1 /* 2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "compiler/oopMap.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "opto/addnode.hpp" 30 #include "opto/block.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/chaitin.hpp" 34 #include "opto/coalesce.hpp" 35 #include "opto/connode.hpp" 36 #include "opto/idealGraphPrinter.hpp" 37 #include "opto/indexSet.hpp" 38 #include "opto/machnode.hpp" 39 #include "opto/memnode.hpp" 40 #include "opto/opcodes.hpp" 41 #include "opto/rootnode.hpp" 42 43 #ifndef PRODUCT 44 void LRG::dump() const { 45 ttyLocker ttyl; 46 tty->print("%d ",num_regs()); 47 _mask.dump(); 48 if( _msize_valid ) { 49 if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size); 50 else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size()); 51 } else { 52 tty->print(", #?(%d) ",_mask.Size()); 53 } 54 55 tty->print("EffDeg: "); 56 if( _degree_valid ) tty->print( "%d ", _eff_degree ); 57 else tty->print("? "); 58 59 if( is_multidef() ) { 60 tty->print("MultiDef "); 61 if (_defs != NULL) { 62 tty->print("("); 63 for (int i = 0; i < _defs->length(); i++) { 64 tty->print("N%d ", _defs->at(i)->_idx); 65 } 66 tty->print(") "); 67 } 68 } 69 else if( _def == 0 ) tty->print("Dead "); 70 else tty->print("Def: N%d ",_def->_idx); 71 72 tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score()); 73 // Flags 74 if( _is_oop ) tty->print("Oop "); 75 if( _is_float ) tty->print("Float "); 76 if( _is_vector ) tty->print("Vector "); 77 if( _was_spilled1 ) tty->print("Spilled "); 78 if( _was_spilled2 ) tty->print("Spilled2 "); 79 if( _direct_conflict ) tty->print("Direct_conflict "); 80 if( _fat_proj ) tty->print("Fat "); 81 if( _was_lo ) tty->print("Lo "); 82 if( _has_copy ) tty->print("Copy "); 83 if( _at_risk ) tty->print("Risk "); 84 85 if( _must_spill ) tty->print("Must_spill "); 86 if( _is_bound ) tty->print("Bound "); 87 if( _msize_valid ) { 88 if( _degree_valid && lo_degree() ) tty->print("Trivial "); 89 } 90 91 tty->cr(); 92 } 93 #endif 94 95 // Compute score from cost and area. Low score is best to spill. 96 static double raw_score( double cost, double area ) { 97 return cost - (area*RegisterCostAreaRatio) * 1.52588e-5; 98 } 99 100 double LRG::score() const { 101 // Scale _area by RegisterCostAreaRatio/64K then subtract from cost. 102 // Bigger area lowers score, encourages spilling this live range. 103 // Bigger cost raise score, prevents spilling this live range. 104 // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer 105 // to turn a divide by a constant into a multiply by the reciprical). 106 double score = raw_score( _cost, _area); 107 108 // Account for area. Basically, LRGs covering large areas are better 109 // to spill because more other LRGs get freed up. 110 if( _area == 0.0 ) // No area? Then no progress to spill 111 return 1e35; 112 113 if( _was_spilled2 ) // If spilled once before, we are unlikely 114 return score + 1e30; // to make progress again. 115 116 if( _cost >= _area*3.0 ) // Tiny area relative to cost 117 return score + 1e17; // Probably no progress to spill 118 119 if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost 120 return score + 1e10; // Likely no progress to spill 121 122 return score; 123 } 124 125 #define NUMBUCKS 3 126 127 // Straight out of Tarjan's union-find algorithm 128 uint LiveRangeMap::find_compress(uint lrg) { 129 uint cur = lrg; 130 uint next = _uf_map.at(cur); 131 while (next != cur) { // Scan chain of equivalences 132 assert( next < cur, "always union smaller"); 133 cur = next; // until find a fixed-point 134 next = _uf_map.at(cur); 135 } 136 137 // Core of union-find algorithm: update chain of 138 // equivalences to be equal to the root. 139 while (lrg != next) { 140 uint tmp = _uf_map.at(lrg); 141 _uf_map.at_put(lrg, next); 142 lrg = tmp; 143 } 144 return lrg; 145 } 146 147 // Reset the Union-Find map to identity 148 void LiveRangeMap::reset_uf_map(uint max_lrg_id) { 149 _max_lrg_id= max_lrg_id; 150 // Force the Union-Find mapping to be at least this large 151 _uf_map.at_put_grow(_max_lrg_id, 0); 152 // Initialize it to be the ID mapping. 153 for (uint i = 0; i < _max_lrg_id; ++i) { 154 _uf_map.at_put(i, i); 155 } 156 } 157 158 // Make all Nodes map directly to their final live range; no need for 159 // the Union-Find mapping after this call. 160 void LiveRangeMap::compress_uf_map_for_nodes() { 161 // For all Nodes, compress mapping 162 uint unique = _names.length(); 163 for (uint i = 0; i < unique; ++i) { 164 uint lrg = _names.at(i); 165 uint compressed_lrg = find(lrg); 166 if (lrg != compressed_lrg) { 167 _names.at_put(i, compressed_lrg); 168 } 169 } 170 } 171 172 // Like Find above, but no path compress, so bad asymptotic behavior 173 uint LiveRangeMap::find_const(uint lrg) const { 174 if (!lrg) { 175 return lrg; // Ignore the zero LRG 176 } 177 178 // Off the end? This happens during debugging dumps when you got 179 // brand new live ranges but have not told the allocator yet. 180 if (lrg >= _max_lrg_id) { 181 return lrg; 182 } 183 184 uint next = _uf_map.at(lrg); 185 while (next != lrg) { // Scan chain of equivalences 186 assert(next < lrg, "always union smaller"); 187 lrg = next; // until find a fixed-point 188 next = _uf_map.at(lrg); 189 } 190 return next; 191 } 192 193 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) 194 : PhaseRegAlloc(unique, cfg, matcher, 195 #ifndef PRODUCT 196 print_chaitin_statistics 197 #else 198 NULL 199 #endif 200 ) 201 , _lrg_map(Thread::current()->resource_area(), unique) 202 , _live(0) 203 , _spilled_once(Thread::current()->resource_area()) 204 , _spilled_twice(Thread::current()->resource_area()) 205 , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0) 206 , _oldphi(unique) 207 #ifndef PRODUCT 208 , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling")) 209 #endif 210 { 211 NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); ) 212 213 _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency()); 214 215 // Build a list of basic blocks, sorted by frequency 216 _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); 217 // Experiment with sorting strategies to speed compilation 218 double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket 219 Block **buckets[NUMBUCKS]; // Array of buckets 220 uint buckcnt[NUMBUCKS]; // Array of bucket counters 221 double buckval[NUMBUCKS]; // Array of bucket value cutoffs 222 for (uint i = 0; i < NUMBUCKS; i++) { 223 buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); 224 buckcnt[i] = 0; 225 // Bump by three orders of magnitude each time 226 cutoff *= 0.001; 227 buckval[i] = cutoff; 228 for (uint j = 0; j < _cfg.number_of_blocks(); j++) { 229 buckets[i][j] = NULL; 230 } 231 } 232 // Sort blocks into buckets 233 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 234 for (uint j = 0; j < NUMBUCKS; j++) { 235 if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) { 236 // Assign block to end of list for appropriate bucket 237 buckets[j][buckcnt[j]++] = _cfg.get_block(i); 238 break; // kick out of inner loop 239 } 240 } 241 } 242 // Dump buckets into final block array 243 uint blkcnt = 0; 244 for (uint i = 0; i < NUMBUCKS; i++) { 245 for (uint j = 0; j < buckcnt[i]; j++) { 246 _blks[blkcnt++] = buckets[i][j]; 247 } 248 } 249 250 assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled"); 251 } 252 253 // union 2 sets together. 254 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { 255 uint src = _lrg_map.find(src_n); 256 uint dst = _lrg_map.find(dst_n); 257 assert(src, ""); 258 assert(dst, ""); 259 assert(src < _lrg_map.max_lrg_id(), "oob"); 260 assert(dst < _lrg_map.max_lrg_id(), "oob"); 261 assert(src < dst, "always union smaller"); 262 _lrg_map.uf_map(dst, src); 263 } 264 265 void PhaseChaitin::new_lrg(const Node *x, uint lrg) { 266 // Make the Node->LRG mapping 267 _lrg_map.extend(x->_idx,lrg); 268 // Make the Union-Find mapping an identity function 269 _lrg_map.uf_extend(lrg, lrg); 270 } 271 272 273 int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) { 274 assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections"); 275 DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); ) 276 int found_projs = 0; 277 uint cnt = orig->outcnt(); 278 for (uint i = 0; i < cnt; i++) { 279 Node* proj = orig->raw_out(i); 280 if (proj->is_MachProj()) { 281 assert(proj->outcnt() == 0, "only kill projections are expected here"); 282 assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections"); 283 found_projs++; 284 // Copy kill projections after the cloned node 285 Node* kills = proj->clone(); 286 kills->set_req(0, copy); 287 b->insert_node(kills, idx++); 288 _cfg.map_node_to_block(kills, b); 289 new_lrg(kills, max_lrg_id++); 290 } 291 } 292 return found_projs; 293 } 294 295 // Renumber the live ranges to compact them. Makes the IFG smaller. 296 void PhaseChaitin::compact() { 297 // Current the _uf_map contains a series of short chains which are headed 298 // by a self-cycle. All the chains run from big numbers to little numbers. 299 // The Find() call chases the chains & shortens them for the next Find call. 300 // We are going to change this structure slightly. Numbers above a moving 301 // wave 'i' are unchanged. Numbers below 'j' point directly to their 302 // compacted live range with no further chaining. There are no chains or 303 // cycles below 'i', so the Find call no longer works. 304 uint j=1; 305 uint i; 306 for (i = 1; i < _lrg_map.max_lrg_id(); i++) { 307 uint lr = _lrg_map.uf_live_range_id(i); 308 // Ignore unallocated live ranges 309 if (!lr) { 310 continue; 311 } 312 assert(lr <= i, ""); 313 _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr)); 314 } 315 // Now change the Node->LR mapping to reflect the compacted names 316 uint unique = _lrg_map.size(); 317 for (i = 0; i < unique; i++) { 318 uint lrg_id = _lrg_map.live_range_id(i); 319 _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id)); 320 } 321 322 // Reset the Union-Find mapping 323 _lrg_map.reset_uf_map(j); 324 } 325 326 void PhaseChaitin::Register_Allocate() { 327 328 // Above the OLD FP (and in registers) are the incoming arguments. Stack 329 // slots in this area are called "arg_slots". Above the NEW FP (and in 330 // registers) is the outgoing argument area; above that is the spill/temp 331 // area. These are all "frame_slots". Arg_slots start at the zero 332 // stack_slots and count up to the known arg_size. Frame_slots start at 333 // the stack_slot #arg_size and go up. After allocation I map stack 334 // slots to actual offsets. Stack-slots in the arg_slot area are biased 335 // by the frame_size; stack-slots in the frame_slot area are biased by 0. 336 337 _trip_cnt = 0; 338 _alternate = 0; 339 _matcher._allocation_started = true; 340 341 ResourceArea split_arena; // Arena for Split local resources 342 ResourceArea live_arena; // Arena for liveness & IFG info 343 ResourceMark rm(&live_arena); 344 345 // Need live-ness for the IFG; need the IFG for coalescing. If the 346 // liveness is JUST for coalescing, then I can get some mileage by renaming 347 // all copy-related live ranges low and then using the max copy-related 348 // live range as a cut-off for LIVE and the IFG. In other words, I can 349 // build a subset of LIVE and IFG just for copies. 350 PhaseLive live(_cfg, _lrg_map.names(), &live_arena); 351 352 // Need IFG for coalescing and coloring 353 PhaseIFG ifg(&live_arena); 354 _ifg = &ifg; 355 356 // Come out of SSA world to the Named world. Assign (virtual) registers to 357 // Nodes. Use the same register for all inputs and the output of PhiNodes 358 // - effectively ending SSA form. This requires either coalescing live 359 // ranges or inserting copies. For the moment, we insert "virtual copies" 360 // - we pretend there is a copy prior to each Phi in predecessor blocks. 361 // We will attempt to coalesce such "virtual copies" before we manifest 362 // them for real. 363 de_ssa(); 364 365 #ifdef ASSERT 366 // Veify the graph before RA. 367 verify(&live_arena); 368 #endif 369 370 { 371 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 372 _live = NULL; // Mark live as being not available 373 rm.reset_to_mark(); // Reclaim working storage 374 IndexSet::reset_memory(C, &live_arena); 375 ifg.init(_lrg_map.max_lrg_id()); // Empty IFG 376 gather_lrg_masks( false ); // Collect LRG masks 377 live.compute(_lrg_map.max_lrg_id()); // Compute liveness 378 _live = &live; // Mark LIVE as being available 379 } 380 381 // Base pointers are currently "used" by instructions which define new 382 // derived pointers. This makes base pointers live up to the where the 383 // derived pointer is made, but not beyond. Really, they need to be live 384 // across any GC point where the derived value is live. So this code looks 385 // at all the GC points, and "stretches" the live range of any base pointer 386 // to the GC point. 387 if (stretch_base_pointer_live_ranges(&live_arena)) { 388 NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);) 389 // Since some live range stretched, I need to recompute live 390 _live = NULL; 391 rm.reset_to_mark(); // Reclaim working storage 392 IndexSet::reset_memory(C, &live_arena); 393 ifg.init(_lrg_map.max_lrg_id()); 394 gather_lrg_masks(false); 395 live.compute(_lrg_map.max_lrg_id()); 396 _live = &live; 397 } 398 // Create the interference graph using virtual copies 399 build_ifg_virtual(); // Include stack slots this time 400 401 // Aggressive (but pessimistic) copy coalescing. 402 // This pass works on virtual copies. Any virtual copies which are not 403 // coalesced get manifested as actual copies 404 { 405 // The IFG is/was triangular. I am 'squaring it up' so Union can run 406 // faster. Union requires a 'for all' operation which is slow on the 407 // triangular adjacency matrix (quick reminder: the IFG is 'sparse' - 408 // meaning I can visit all the Nodes neighbors less than a Node in time 409 // O(# of neighbors), but I have to visit all the Nodes greater than a 410 // given Node and search them for an instance, i.e., time O(#MaxLRG)). 411 _ifg->SquareUp(); 412 413 PhaseAggressiveCoalesce coalesce(*this); 414 coalesce.coalesce_driver(); 415 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do 416 // not match the Phi itself, insert a copy. 417 coalesce.insert_copies(_matcher); 418 if (C->failing()) { 419 return; 420 } 421 } 422 423 // After aggressive coalesce, attempt a first cut at coloring. 424 // To color, we need the IFG and for that we need LIVE. 425 { 426 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 427 _live = NULL; 428 rm.reset_to_mark(); // Reclaim working storage 429 IndexSet::reset_memory(C, &live_arena); 430 ifg.init(_lrg_map.max_lrg_id()); 431 gather_lrg_masks( true ); 432 live.compute(_lrg_map.max_lrg_id()); 433 _live = &live; 434 } 435 436 // Build physical interference graph 437 uint must_spill = 0; 438 must_spill = build_ifg_physical(&live_arena); 439 // If we have a guaranteed spill, might as well spill now 440 if (must_spill) { 441 if(!_lrg_map.max_lrg_id()) { 442 return; 443 } 444 // Bail out if unique gets too large (ie - unique > MaxNodeLimit) 445 C->check_node_count(10*must_spill, "out of nodes before split"); 446 if (C->failing()) { 447 return; 448 } 449 450 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere 451 _lrg_map.set_max_lrg_id(new_max_lrg_id); 452 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) 453 // or we failed to split 454 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split"); 455 if (C->failing()) { 456 return; 457 } 458 459 NOT_PRODUCT(C->verify_graph_edges();) 460 461 compact(); // Compact LRGs; return new lower max lrg 462 463 { 464 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 465 _live = NULL; 466 rm.reset_to_mark(); // Reclaim working storage 467 IndexSet::reset_memory(C, &live_arena); 468 ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph 469 gather_lrg_masks( true ); // Collect intersect mask 470 live.compute(_lrg_map.max_lrg_id()); // Compute LIVE 471 _live = &live; 472 } 473 build_ifg_physical(&live_arena); 474 _ifg->SquareUp(); 475 _ifg->Compute_Effective_Degree(); 476 // Only do conservative coalescing if requested 477 if (OptoCoalesce) { 478 // Conservative (and pessimistic) copy coalescing of those spills 479 PhaseConservativeCoalesce coalesce(*this); 480 // If max live ranges greater than cutoff, don't color the stack. 481 // This cutoff can be larger than below since it is only done once. 482 coalesce.coalesce_driver(); 483 } 484 _lrg_map.compress_uf_map_for_nodes(); 485 486 #ifdef ASSERT 487 verify(&live_arena, true); 488 #endif 489 } else { 490 ifg.SquareUp(); 491 ifg.Compute_Effective_Degree(); 492 #ifdef ASSERT 493 set_was_low(); 494 #endif 495 } 496 497 // Prepare for Simplify & Select 498 cache_lrg_info(); // Count degree of LRGs 499 500 // Simplify the InterFerence Graph by removing LRGs of low degree. 501 // LRGs of low degree are trivially colorable. 502 Simplify(); 503 504 // Select colors by re-inserting LRGs back into the IFG in reverse order. 505 // Return whether or not something spills. 506 uint spills = Select( ); 507 508 // If we spill, split and recycle the entire thing 509 while( spills ) { 510 if( _trip_cnt++ > 24 ) { 511 DEBUG_ONLY( dump_for_spill_split_recycle(); ) 512 if( _trip_cnt > 27 ) { 513 C->record_method_not_compilable("failed spill-split-recycle sanity check"); 514 return; 515 } 516 } 517 518 if (!_lrg_map.max_lrg_id()) { 519 return; 520 } 521 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere 522 _lrg_map.set_max_lrg_id(new_max_lrg_id); 523 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) 524 C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split"); 525 if (C->failing()) { 526 return; 527 } 528 529 compact(); // Compact LRGs; return new lower max lrg 530 531 // Nuke the live-ness and interference graph and LiveRanGe info 532 { 533 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 534 _live = NULL; 535 rm.reset_to_mark(); // Reclaim working storage 536 IndexSet::reset_memory(C, &live_arena); 537 ifg.init(_lrg_map.max_lrg_id()); 538 539 // Create LiveRanGe array. 540 // Intersect register masks for all USEs and DEFs 541 gather_lrg_masks(true); 542 live.compute(_lrg_map.max_lrg_id()); 543 _live = &live; 544 } 545 must_spill = build_ifg_physical(&live_arena); 546 _ifg->SquareUp(); 547 _ifg->Compute_Effective_Degree(); 548 549 // Only do conservative coalescing if requested 550 if (OptoCoalesce) { 551 // Conservative (and pessimistic) copy coalescing 552 PhaseConservativeCoalesce coalesce(*this); 553 // Check for few live ranges determines how aggressive coalesce is. 554 coalesce.coalesce_driver(); 555 } 556 _lrg_map.compress_uf_map_for_nodes(); 557 #ifdef ASSERT 558 verify(&live_arena, true); 559 #endif 560 cache_lrg_info(); // Count degree of LRGs 561 562 // Simplify the InterFerence Graph by removing LRGs of low degree. 563 // LRGs of low degree are trivially colorable. 564 Simplify(); 565 566 // Select colors by re-inserting LRGs back into the IFG in reverse order. 567 // Return whether or not something spills. 568 spills = Select(); 569 } 570 571 // Count number of Simplify-Select trips per coloring success. 572 _allocator_attempts += _trip_cnt + 1; 573 _allocator_successes += 1; 574 575 // Peephole remove copies 576 post_allocate_copy_removal(); 577 578 // Merge multidefs if multiple defs representing the same value are used in a single block. 579 merge_multidefs(); 580 581 #ifdef ASSERT 582 // Veify the graph after RA. 583 verify(&live_arena); 584 #endif 585 586 // max_reg is past the largest *register* used. 587 // Convert that to a frame_slot number. 588 if (_max_reg <= _matcher._new_SP) { 589 _framesize = C->out_preserve_stack_slots(); 590 } 591 else { 592 _framesize = _max_reg -_matcher._new_SP; 593 } 594 assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); 595 596 // This frame must preserve the required fp alignment 597 _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots()); 598 assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" ); 599 #ifndef PRODUCT 600 _total_framesize += _framesize; 601 if ((int)_framesize > _max_framesize) { 602 _max_framesize = _framesize; 603 } 604 #endif 605 606 // Convert CISC spills 607 fixup_spills(); 608 609 // Log regalloc results 610 CompileLog* log = Compile::current()->log(); 611 if (log != NULL) { 612 log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); 613 } 614 615 if (C->failing()) { 616 return; 617 } 618 619 NOT_PRODUCT(C->verify_graph_edges();) 620 621 // Move important info out of the live_arena to longer lasting storage. 622 alloc_node_regs(_lrg_map.size()); 623 for (uint i=0; i < _lrg_map.size(); i++) { 624 if (_lrg_map.live_range_id(i)) { // Live range associated with Node? 625 LRG &lrg = lrgs(_lrg_map.live_range_id(i)); 626 if (!lrg.alive()) { 627 set_bad(i); 628 } else if (lrg.num_regs() == 1) { 629 set1(i, lrg.reg()); 630 } else { // Must be a register-set 631 if (!lrg._fat_proj) { // Must be aligned adjacent register set 632 // Live ranges record the highest register in their mask. 633 // We want the low register for the AD file writer's convenience. 634 OptoReg::Name hi = lrg.reg(); // Get hi register 635 OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo 636 // We have to use pair [lo,lo+1] even for wide vectors because 637 // the rest of code generation works only with pairs. It is safe 638 // since for registers encoding only 'lo' is used. 639 // Second reg from pair is used in ScheduleAndBundle on SPARC where 640 // vector max size is 8 which corresponds to registers pair. 641 // It is also used in BuildOopMaps but oop operations are not 642 // vectorized. 643 set2(i, lo); 644 } else { // Misaligned; extract 2 bits 645 OptoReg::Name hi = lrg.reg(); // Get hi register 646 lrg.Remove(hi); // Yank from mask 647 int lo = lrg.mask().find_first_elem(); // Find lo 648 set_pair(i, hi, lo); 649 } 650 } 651 if( lrg._is_oop ) _node_oops.set(i); 652 } else { 653 set_bad(i); 654 } 655 } 656 657 // Done! 658 _live = NULL; 659 _ifg = NULL; 660 C->set_indexSet_arena(NULL); // ResourceArea is at end of scope 661 } 662 663 void PhaseChaitin::de_ssa() { 664 // Set initial Names for all Nodes. Most Nodes get the virtual register 665 // number. A few get the ZERO live range number. These do not 666 // get allocated, but instead rely on correct scheduling to ensure that 667 // only one instance is simultaneously live at a time. 668 uint lr_counter = 1; 669 for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) { 670 Block* block = _cfg.get_block(i); 671 uint cnt = block->number_of_nodes(); 672 673 // Handle all the normal Nodes in the block 674 for( uint j = 0; j < cnt; j++ ) { 675 Node *n = block->get_node(j); 676 // Pre-color to the zero live range, or pick virtual register 677 const RegMask &rm = n->out_RegMask(); 678 _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); 679 } 680 } 681 682 // Reset the Union-Find mapping to be identity 683 _lrg_map.reset_uf_map(lr_counter); 684 } 685 686 687 // Gather LiveRanGe information, including register masks. Modification of 688 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce. 689 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { 690 691 // Nail down the frame pointer live range 692 uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr)); 693 lrgs(fp_lrg)._cost += 1e12; // Cost is infinite 694 695 // For all blocks 696 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 697 Block* block = _cfg.get_block(i); 698 699 // For all instructions 700 for (uint j = 1; j < block->number_of_nodes(); j++) { 701 Node* n = block->get_node(j); 702 uint input_edge_start =1; // Skip control most nodes 703 if (n->is_Mach()) { 704 input_edge_start = n->as_Mach()->oper_input_base(); 705 } 706 uint idx = n->is_Copy(); 707 708 // Get virtual register number, same as LiveRanGe index 709 uint vreg = _lrg_map.live_range_id(n); 710 LRG& lrg = lrgs(vreg); 711 if (vreg) { // No vreg means un-allocable (e.g. memory) 712 713 // Collect has-copy bit 714 if (idx) { 715 lrg._has_copy = 1; 716 uint clidx = _lrg_map.live_range_id(n->in(idx)); 717 LRG& copy_src = lrgs(clidx); 718 copy_src._has_copy = 1; 719 } 720 721 // Check for float-vs-int live range (used in register-pressure 722 // calculations) 723 const Type *n_type = n->bottom_type(); 724 if (n_type->is_floatingpoint()) { 725 lrg._is_float = 1; 726 } 727 728 // Check for twice prior spilling. Once prior spilling might have 729 // spilled 'soft', 2nd prior spill should have spilled 'hard' and 730 // further spilling is unlikely to make progress. 731 if (_spilled_once.test(n->_idx)) { 732 lrg._was_spilled1 = 1; 733 if (_spilled_twice.test(n->_idx)) { 734 lrg._was_spilled2 = 1; 735 } 736 } 737 738 #ifndef PRODUCT 739 if (trace_spilling() && lrg._def != NULL) { 740 // collect defs for MultiDef printing 741 if (lrg._defs == NULL) { 742 lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL); 743 lrg._defs->append(lrg._def); 744 } 745 lrg._defs->append(n); 746 } 747 #endif 748 749 // Check for a single def LRG; these can spill nicely 750 // via rematerialization. Flag as NULL for no def found 751 // yet, or 'n' for single def or -1 for many defs. 752 lrg._def = lrg._def ? NodeSentinel : n; 753 754 // Limit result register mask to acceptable registers 755 const RegMask &rm = n->out_RegMask(); 756 lrg.AND( rm ); 757 758 int ireg = n->ideal_reg(); 759 assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP, 760 "oops must be in Op_RegP's" ); 761 762 // Check for vector live range (only if vector register is used). 763 // On SPARC vector uses RegD which could be misaligned so it is not 764 // processes as vector in RA. 765 if (RegMask::is_vector(ireg)) 766 lrg._is_vector = 1; 767 assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD || ireg == Op_RegL, 768 "vector must be in vector registers"); 769 770 // Check for bound register masks 771 const RegMask &lrgmask = lrg.mask(); 772 if (lrgmask.is_bound(ireg)) { 773 lrg._is_bound = 1; 774 } 775 776 // Check for maximum frequency value 777 if (lrg._maxfreq < block->_freq) { 778 lrg._maxfreq = block->_freq; 779 } 780 781 // Check for oop-iness, or long/double 782 // Check for multi-kill projection 783 switch (ireg) { 784 case MachProjNode::fat_proj: 785 // Fat projections have size equal to number of registers killed 786 lrg.set_num_regs(rm.Size()); 787 lrg.set_reg_pressure(lrg.num_regs()); 788 lrg._fat_proj = 1; 789 lrg._is_bound = 1; 790 break; 791 case Op_RegP: 792 #ifdef _LP64 793 lrg.set_num_regs(2); // Size is 2 stack words 794 #else 795 lrg.set_num_regs(1); // Size is 1 stack word 796 #endif 797 // Register pressure is tracked relative to the maximum values 798 // suggested for that platform, INTPRESSURE and FLOATPRESSURE, 799 // and relative to other types which compete for the same regs. 800 // 801 // The following table contains suggested values based on the 802 // architectures as defined in each .ad file. 803 // INTPRESSURE and FLOATPRESSURE may be tuned differently for 804 // compile-speed or performance. 805 // Note1: 806 // SPARC and SPARCV9 reg_pressures are at 2 instead of 1 807 // since .ad registers are defined as high and low halves. 808 // These reg_pressure values remain compatible with the code 809 // in is_high_pressure() which relates get_invalid_mask_size(), 810 // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE. 811 // Note2: 812 // SPARC -d32 has 24 registers available for integral values, 813 // but only 10 of these are safe for 64-bit longs. 814 // Using set_reg_pressure(2) for both int and long means 815 // the allocator will believe it can fit 26 longs into 816 // registers. Using 2 for longs and 1 for ints means the 817 // allocator will attempt to put 52 integers into registers. 818 // The settings below limit this problem to methods with 819 // many long values which are being run on 32-bit SPARC. 820 // 821 // ------------------- reg_pressure -------------------- 822 // Each entry is reg_pressure_per_value,number_of_regs 823 // RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE 824 // IA32 2 1 1 1 1 6 6 825 // IA64 1 1 1 1 1 50 41 826 // SPARC 2 2 2 2 2 48 (24) 52 (26) 827 // SPARCV9 2 2 2 2 2 48 (24) 52 (26) 828 // AMD64 1 1 1 1 1 14 15 829 // ----------------------------------------------------- 830 #if defined(SPARC) 831 lrg.set_reg_pressure(2); // use for v9 as well 832 #else 833 lrg.set_reg_pressure(1); // normally one value per register 834 #endif 835 if( n_type->isa_oop_ptr() ) { 836 lrg._is_oop = 1; 837 } 838 break; 839 case Op_RegL: // Check for long or double 840 case Op_RegD: 841 lrg.set_num_regs(2); 842 // Define platform specific register pressure 843 #if defined(SPARC) || defined(ARM32) 844 lrg.set_reg_pressure(2); 845 #elif defined(IA32) 846 if( ireg == Op_RegL ) { 847 lrg.set_reg_pressure(2); 848 } else { 849 lrg.set_reg_pressure(1); 850 } 851 #else 852 lrg.set_reg_pressure(1); // normally one value per register 853 #endif 854 // If this def of a double forces a mis-aligned double, 855 // flag as '_fat_proj' - really flag as allowing misalignment 856 // AND changes how we count interferences. A mis-aligned 857 // double can interfere with TWO aligned pairs, or effectively 858 // FOUR registers! 859 if (rm.is_misaligned_pair()) { 860 lrg._fat_proj = 1; 861 lrg._is_bound = 1; 862 } 863 break; 864 case Op_RegF: 865 case Op_RegI: 866 case Op_RegN: 867 case Op_RegFlags: 868 case 0: // not an ideal register 869 lrg.set_num_regs(1); 870 #ifdef SPARC 871 lrg.set_reg_pressure(2); 872 #else 873 lrg.set_reg_pressure(1); 874 #endif 875 break; 876 case Op_VecS: 877 assert(Matcher::vector_size_supported(T_BYTE,4), "sanity"); 878 assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity"); 879 lrg.set_num_regs(RegMask::SlotsPerVecS); 880 lrg.set_reg_pressure(1); 881 break; 882 case Op_VecD: 883 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity"); 884 assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity"); 885 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned"); 886 lrg.set_num_regs(RegMask::SlotsPerVecD); 887 lrg.set_reg_pressure(1); 888 break; 889 case Op_VecX: 890 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity"); 891 assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity"); 892 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned"); 893 lrg.set_num_regs(RegMask::SlotsPerVecX); 894 lrg.set_reg_pressure(1); 895 break; 896 case Op_VecY: 897 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity"); 898 assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity"); 899 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned"); 900 lrg.set_num_regs(RegMask::SlotsPerVecY); 901 lrg.set_reg_pressure(1); 902 break; 903 default: 904 ShouldNotReachHere(); 905 } 906 } 907 908 // Now do the same for inputs 909 uint cnt = n->req(); 910 // Setup for CISC SPILLING 911 uint inp = (uint)AdlcVMDeps::Not_cisc_spillable; 912 if( UseCISCSpill && after_aggressive ) { 913 inp = n->cisc_operand(); 914 if( inp != (uint)AdlcVMDeps::Not_cisc_spillable ) 915 // Convert operand number to edge index number 916 inp = n->as_Mach()->operand_index(inp); 917 } 918 // Prepare register mask for each input 919 for( uint k = input_edge_start; k < cnt; k++ ) { 920 uint vreg = _lrg_map.live_range_id(n->in(k)); 921 if (!vreg) { 922 continue; 923 } 924 925 // If this instruction is CISC Spillable, add the flags 926 // bit to its appropriate input 927 if( UseCISCSpill && after_aggressive && inp == k ) { 928 #ifndef PRODUCT 929 if( TraceCISCSpill ) { 930 tty->print(" use_cisc_RegMask: "); 931 n->dump(); 932 } 933 #endif 934 n->as_Mach()->use_cisc_RegMask(); 935 } 936 937 LRG &lrg = lrgs(vreg); 938 // // Testing for floating point code shape 939 // Node *test = n->in(k); 940 // if( test->is_Mach() ) { 941 // MachNode *m = test->as_Mach(); 942 // int op = m->ideal_Opcode(); 943 // if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) { 944 // int zzz = 1; 945 // } 946 // } 947 948 // Limit result register mask to acceptable registers. 949 // Do not limit registers from uncommon uses before 950 // AggressiveCoalesce. This effectively pre-virtual-splits 951 // around uncommon uses of common defs. 952 const RegMask &rm = n->in_RegMask(k); 953 if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) { 954 // Since we are BEFORE aggressive coalesce, leave the register 955 // mask untrimmed by the call. This encourages more coalescing. 956 // Later, AFTER aggressive, this live range will have to spill 957 // but the spiller handles slow-path calls very nicely. 958 } else { 959 lrg.AND( rm ); 960 } 961 962 // Check for bound register masks 963 const RegMask &lrgmask = lrg.mask(); 964 int kreg = n->in(k)->ideal_reg(); 965 bool is_vect = RegMask::is_vector(kreg); 966 assert(n->in(k)->bottom_type()->isa_vect() == NULL || 967 is_vect || kreg == Op_RegD || kreg == Op_RegL, 968 "vector must be in vector registers"); 969 if (lrgmask.is_bound(kreg)) 970 lrg._is_bound = 1; 971 972 // If this use of a double forces a mis-aligned double, 973 // flag as '_fat_proj' - really flag as allowing misalignment 974 // AND changes how we count interferences. A mis-aligned 975 // double can interfere with TWO aligned pairs, or effectively 976 // FOUR registers! 977 #ifdef ASSERT 978 if (is_vect) { 979 assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned"); 980 assert(!lrg._fat_proj, "sanity"); 981 assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity"); 982 } 983 #endif 984 if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) { 985 lrg._fat_proj = 1; 986 lrg._is_bound = 1; 987 } 988 // if the LRG is an unaligned pair, we will have to spill 989 // so clear the LRG's register mask if it is not already spilled 990 if (!is_vect && !n->is_SpillCopy() && 991 (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && 992 lrgmask.is_misaligned_pair()) { 993 lrg.Clear(); 994 } 995 996 // Check for maximum frequency value 997 if (lrg._maxfreq < block->_freq) { 998 lrg._maxfreq = block->_freq; 999 } 1000 1001 } // End for all allocated inputs 1002 } // end for all instructions 1003 } // end for all blocks 1004 1005 // Final per-liverange setup 1006 for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) { 1007 LRG &lrg = lrgs(i2); 1008 assert(!lrg._is_vector || !lrg._fat_proj, "sanity"); 1009 if (lrg.num_regs() > 1 && !lrg._fat_proj) { 1010 lrg.clear_to_sets(); 1011 } 1012 lrg.compute_set_mask_size(); 1013 if (lrg.not_free()) { // Handle case where we lose from the start 1014 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG)); 1015 lrg._direct_conflict = 1; 1016 } 1017 lrg.set_degree(0); // no neighbors in IFG yet 1018 } 1019 } 1020 1021 // Set the was-lo-degree bit. Conservative coalescing should not change the 1022 // colorability of the graph. If any live range was of low-degree before 1023 // coalescing, it should Simplify. This call sets the was-lo-degree bit. 1024 // The bit is checked in Simplify. 1025 void PhaseChaitin::set_was_low() { 1026 #ifdef ASSERT 1027 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1028 int size = lrgs(i).num_regs(); 1029 uint old_was_lo = lrgs(i)._was_lo; 1030 lrgs(i)._was_lo = 0; 1031 if( lrgs(i).lo_degree() ) { 1032 lrgs(i)._was_lo = 1; // Trivially of low degree 1033 } else { // Else check the Brigg's assertion 1034 // Brigg's observation is that the lo-degree neighbors of a 1035 // hi-degree live range will not interfere with the color choices 1036 // of said hi-degree live range. The Simplify reverse-stack-coloring 1037 // order takes care of the details. Hence you do not have to count 1038 // low-degree neighbors when determining if this guy colors. 1039 int briggs_degree = 0; 1040 IndexSet *s = _ifg->neighbors(i); 1041 IndexSetIterator elements(s); 1042 uint lidx; 1043 while((lidx = elements.next()) != 0) { 1044 if( !lrgs(lidx).lo_degree() ) 1045 briggs_degree += MAX2(size,lrgs(lidx).num_regs()); 1046 } 1047 if( briggs_degree < lrgs(i).degrees_of_freedom() ) 1048 lrgs(i)._was_lo = 1; // Low degree via the briggs assertion 1049 } 1050 assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease"); 1051 } 1052 #endif 1053 } 1054 1055 #define REGISTER_CONSTRAINED 16 1056 1057 // Compute cost/area ratio, in case we spill. Build the lo-degree list. 1058 void PhaseChaitin::cache_lrg_info( ) { 1059 1060 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1061 LRG &lrg = lrgs(i); 1062 1063 // Check for being of low degree: means we can be trivially colored. 1064 // Low degree, dead or must-spill guys just get to simplify right away 1065 if( lrg.lo_degree() || 1066 !lrg.alive() || 1067 lrg._must_spill ) { 1068 // Split low degree list into those guys that must get a 1069 // register and those that can go to register or stack. 1070 // The idea is LRGs that can go register or stack color first when 1071 // they have a good chance of getting a register. The register-only 1072 // lo-degree live ranges always get a register. 1073 OptoReg::Name hi_reg = lrg.mask().find_last_elem(); 1074 if( OptoReg::is_stack(hi_reg)) { // Can go to stack? 1075 lrg._next = _lo_stk_degree; 1076 _lo_stk_degree = i; 1077 } else { 1078 lrg._next = _lo_degree; 1079 _lo_degree = i; 1080 } 1081 } else { // Else high degree 1082 lrgs(_hi_degree)._prev = i; 1083 lrg._next = _hi_degree; 1084 lrg._prev = 0; 1085 _hi_degree = i; 1086 } 1087 } 1088 } 1089 1090 // Simplify the IFG by removing LRGs of low degree that have NO copies 1091 void PhaseChaitin::Pre_Simplify( ) { 1092 1093 // Warm up the lo-degree no-copy list 1094 int lo_no_copy = 0; 1095 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1096 if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) || 1097 !lrgs(i).alive() || 1098 lrgs(i)._must_spill) { 1099 lrgs(i)._next = lo_no_copy; 1100 lo_no_copy = i; 1101 } 1102 } 1103 1104 while( lo_no_copy ) { 1105 uint lo = lo_no_copy; 1106 lo_no_copy = lrgs(lo)._next; 1107 int size = lrgs(lo).num_regs(); 1108 1109 // Put the simplified guy on the simplified list. 1110 lrgs(lo)._next = _simplified; 1111 _simplified = lo; 1112 1113 // Yank this guy from the IFG. 1114 IndexSet *adj = _ifg->remove_node( lo ); 1115 1116 // If any neighbors' degrees fall below their number of 1117 // allowed registers, then put that neighbor on the low degree 1118 // list. Note that 'degree' can only fall and 'numregs' is 1119 // unchanged by this action. Thus the two are equal at most once, 1120 // so LRGs hit the lo-degree worklists at most once. 1121 IndexSetIterator elements(adj); 1122 uint neighbor; 1123 while ((neighbor = elements.next()) != 0) { 1124 LRG *n = &lrgs(neighbor); 1125 assert( _ifg->effective_degree(neighbor) == n->degree(), "" ); 1126 1127 // Check for just becoming of-low-degree 1128 if( n->just_lo_degree() && !n->_has_copy ) { 1129 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice"); 1130 // Put on lo-degree list 1131 n->_next = lo_no_copy; 1132 lo_no_copy = neighbor; 1133 } 1134 } 1135 } // End of while lo-degree no_copy worklist not empty 1136 1137 // No more lo-degree no-copy live ranges to simplify 1138 } 1139 1140 // Simplify the IFG by removing LRGs of low degree. 1141 void PhaseChaitin::Simplify( ) { 1142 1143 while( 1 ) { // Repeat till simplified it all 1144 // May want to explore simplifying lo_degree before _lo_stk_degree. 1145 // This might result in more spills coloring into registers during 1146 // Select(). 1147 while( _lo_degree || _lo_stk_degree ) { 1148 // If possible, pull from lo_stk first 1149 uint lo; 1150 if( _lo_degree ) { 1151 lo = _lo_degree; 1152 _lo_degree = lrgs(lo)._next; 1153 } else { 1154 lo = _lo_stk_degree; 1155 _lo_stk_degree = lrgs(lo)._next; 1156 } 1157 1158 // Put the simplified guy on the simplified list. 1159 lrgs(lo)._next = _simplified; 1160 _simplified = lo; 1161 // If this guy is "at risk" then mark his current neighbors 1162 if( lrgs(lo)._at_risk ) { 1163 IndexSetIterator elements(_ifg->neighbors(lo)); 1164 uint datum; 1165 while ((datum = elements.next()) != 0) { 1166 lrgs(datum)._risk_bias = lo; 1167 } 1168 } 1169 1170 // Yank this guy from the IFG. 1171 IndexSet *adj = _ifg->remove_node( lo ); 1172 1173 // If any neighbors' degrees fall below their number of 1174 // allowed registers, then put that neighbor on the low degree 1175 // list. Note that 'degree' can only fall and 'numregs' is 1176 // unchanged by this action. Thus the two are equal at most once, 1177 // so LRGs hit the lo-degree worklist at most once. 1178 IndexSetIterator elements(adj); 1179 uint neighbor; 1180 while ((neighbor = elements.next()) != 0) { 1181 LRG *n = &lrgs(neighbor); 1182 #ifdef ASSERT 1183 if( VerifyOpto || VerifyRegisterAllocator ) { 1184 assert( _ifg->effective_degree(neighbor) == n->degree(), "" ); 1185 } 1186 #endif 1187 1188 // Check for just becoming of-low-degree just counting registers. 1189 // _must_spill live ranges are already on the low degree list. 1190 if( n->just_lo_degree() && !n->_must_spill ) { 1191 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice"); 1192 // Pull from hi-degree list 1193 uint prev = n->_prev; 1194 uint next = n->_next; 1195 if( prev ) lrgs(prev)._next = next; 1196 else _hi_degree = next; 1197 lrgs(next)._prev = prev; 1198 n->_next = _lo_degree; 1199 _lo_degree = neighbor; 1200 } 1201 } 1202 } // End of while lo-degree/lo_stk_degree worklist not empty 1203 1204 // Check for got everything: is hi-degree list empty? 1205 if( !_hi_degree ) break; 1206 1207 // Time to pick a potential spill guy 1208 uint lo_score = _hi_degree; 1209 double score = lrgs(lo_score).score(); 1210 double area = lrgs(lo_score)._area; 1211 double cost = lrgs(lo_score)._cost; 1212 bool bound = lrgs(lo_score)._is_bound; 1213 1214 // Find cheapest guy 1215 debug_only( int lo_no_simplify=0; ); 1216 for( uint i = _hi_degree; i; i = lrgs(i)._next ) { 1217 assert( !(*_ifg->_yanked)[i], "" ); 1218 // It's just vaguely possible to move hi-degree to lo-degree without 1219 // going through a just-lo-degree stage: If you remove a double from 1220 // a float live range it's degree will drop by 2 and you can skip the 1221 // just-lo-degree stage. It's very rare (shows up after 5000+ methods 1222 // in -Xcomp of Java2Demo). So just choose this guy to simplify next. 1223 if( lrgs(i).lo_degree() ) { 1224 lo_score = i; 1225 break; 1226 } 1227 debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; ); 1228 double iscore = lrgs(i).score(); 1229 double iarea = lrgs(i)._area; 1230 double icost = lrgs(i)._cost; 1231 bool ibound = lrgs(i)._is_bound; 1232 1233 // Compare cost/area of i vs cost/area of lo_score. Smaller cost/area 1234 // wins. Ties happen because all live ranges in question have spilled 1235 // a few times before and the spill-score adds a huge number which 1236 // washes out the low order bits. We are choosing the lesser of 2 1237 // evils; in this case pick largest area to spill. 1238 // Ties also happen when live ranges are defined and used only inside 1239 // one block. In which case their area is 0 and score set to max. 1240 // In such case choose bound live range over unbound to free registers 1241 // or with smaller cost to spill. 1242 if( iscore < score || 1243 (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) || 1244 (iscore == score && iarea == area && 1245 ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) { 1246 lo_score = i; 1247 score = iscore; 1248 area = iarea; 1249 cost = icost; 1250 bound = ibound; 1251 } 1252 } 1253 LRG *lo_lrg = &lrgs(lo_score); 1254 // The live range we choose for spilling is either hi-degree, or very 1255 // rarely it can be low-degree. If we choose a hi-degree live range 1256 // there better not be any lo-degree choices. 1257 assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" ); 1258 1259 // Pull from hi-degree list 1260 uint prev = lo_lrg->_prev; 1261 uint next = lo_lrg->_next; 1262 if( prev ) lrgs(prev)._next = next; 1263 else _hi_degree = next; 1264 lrgs(next)._prev = prev; 1265 // Jam him on the lo-degree list, despite his high degree. 1266 // Maybe he'll get a color, and maybe he'll spill. 1267 // Only Select() will know. 1268 lrgs(lo_score)._at_risk = true; 1269 _lo_degree = lo_score; 1270 lo_lrg->_next = 0; 1271 1272 } // End of while not simplified everything 1273 1274 } 1275 1276 // Is 'reg' register legal for 'lrg'? 1277 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) { 1278 if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) && 1279 lrg.mask().Member(OptoReg::add(reg,-chunk))) { 1280 // RA uses OptoReg which represent the highest element of a registers set. 1281 // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set 1282 // in which XMMd is used by RA to represent such vectors. A double value 1283 // uses [XMM,XMMb] pairs and XMMb is used by RA for it. 1284 // The register mask uses largest bits set of overlapping register sets. 1285 // On x86 with AVX it uses 8 bits for each XMM registers set. 1286 // 1287 // The 'lrg' already has cleared-to-set register mask (done in Select() 1288 // before calling choose_color()). Passing mask.Member(reg) check above 1289 // indicates that the size (num_regs) of 'reg' set is less or equal to 1290 // 'lrg' set size. 1291 // For set size 1 any register which is member of 'lrg' mask is legal. 1292 if (lrg.num_regs()==1) 1293 return true; 1294 // For larger sets only an aligned register with the same set size is legal. 1295 int mask = lrg.num_regs()-1; 1296 if ((reg&mask) == mask) 1297 return true; 1298 } 1299 return false; 1300 } 1301 1302 // Choose a color using the biasing heuristic 1303 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { 1304 1305 // Check for "at_risk" LRG's 1306 uint risk_lrg = _lrg_map.find(lrg._risk_bias); 1307 if( risk_lrg != 0 ) { 1308 // Walk the colored neighbors of the "at_risk" candidate 1309 // Choose a color which is both legal and already taken by a neighbor 1310 // of the "at_risk" candidate in order to improve the chances of the 1311 // "at_risk" candidate of coloring 1312 IndexSetIterator elements(_ifg->neighbors(risk_lrg)); 1313 uint datum; 1314 while ((datum = elements.next()) != 0) { 1315 OptoReg::Name reg = lrgs(datum).reg(); 1316 // If this LRG's register is legal for us, choose it 1317 if (is_legal_reg(lrg, reg, chunk)) 1318 return reg; 1319 } 1320 } 1321 1322 uint copy_lrg = _lrg_map.find(lrg._copy_bias); 1323 if( copy_lrg != 0 ) { 1324 // If he has a color, 1325 if( !(*(_ifg->_yanked))[copy_lrg] ) { 1326 OptoReg::Name reg = lrgs(copy_lrg).reg(); 1327 // And it is legal for you, 1328 if (is_legal_reg(lrg, reg, chunk)) 1329 return reg; 1330 } else if( chunk == 0 ) { 1331 // Choose a color which is legal for him 1332 RegMask tempmask = lrg.mask(); 1333 tempmask.AND(lrgs(copy_lrg).mask()); 1334 tempmask.clear_to_sets(lrg.num_regs()); 1335 OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs()); 1336 if (OptoReg::is_valid(reg)) 1337 return reg; 1338 } 1339 } 1340 1341 // If no bias info exists, just go with the register selection ordering 1342 if (lrg._is_vector || lrg.num_regs() == 2) { 1343 // Find an aligned set 1344 return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk); 1345 } 1346 1347 // CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate 1348 // copy removal to remove many more copies, by preventing a just-assigned 1349 // register from being repeatedly assigned. 1350 OptoReg::Name reg = lrg.mask().find_first_elem(); 1351 if( (++_alternate & 1) && OptoReg::is_valid(reg) ) { 1352 // This 'Remove; find; Insert' idiom is an expensive way to find the 1353 // SECOND element in the mask. 1354 lrg.Remove(reg); 1355 OptoReg::Name reg2 = lrg.mask().find_first_elem(); 1356 lrg.Insert(reg); 1357 if( OptoReg::is_reg(reg2)) 1358 reg = reg2; 1359 } 1360 return OptoReg::add( reg, chunk ); 1361 } 1362 1363 // Choose a color in the current chunk 1364 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) { 1365 assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)"); 1366 assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)"); 1367 1368 if( lrg.num_regs() == 1 || // Common Case 1369 !lrg._fat_proj ) // Aligned+adjacent pairs ok 1370 // Use a heuristic to "bias" the color choice 1371 return bias_color(lrg, chunk); 1372 1373 assert(!lrg._is_vector, "should be not vector here" ); 1374 assert( lrg.num_regs() >= 2, "dead live ranges do not color" ); 1375 1376 // Fat-proj case or misaligned double argument. 1377 assert(lrg.compute_mask_size() == lrg.num_regs() || 1378 lrg.num_regs() == 2,"fat projs exactly color" ); 1379 assert( !chunk, "always color in 1st chunk" ); 1380 // Return the highest element in the set. 1381 return lrg.mask().find_last_elem(); 1382 } 1383 1384 // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted 1385 // in reverse order of removal. As long as nothing of hi-degree was yanked, 1386 // everything going back is guaranteed a color. Select that color. If some 1387 // hi-degree LRG cannot get a color then we record that we must spill. 1388 uint PhaseChaitin::Select( ) { 1389 uint spill_reg = LRG::SPILL_REG; 1390 _max_reg = OptoReg::Name(0); // Past max register used 1391 while( _simplified ) { 1392 // Pull next LRG from the simplified list - in reverse order of removal 1393 uint lidx = _simplified; 1394 LRG *lrg = &lrgs(lidx); 1395 _simplified = lrg->_next; 1396 1397 1398 #ifndef PRODUCT 1399 if (trace_spilling()) { 1400 ttyLocker ttyl; 1401 tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(), 1402 lrg->degrees_of_freedom()); 1403 lrg->dump(); 1404 } 1405 #endif 1406 1407 // Re-insert into the IFG 1408 _ifg->re_insert(lidx); 1409 if( !lrg->alive() ) continue; 1410 // capture allstackedness flag before mask is hacked 1411 const int is_allstack = lrg->mask().is_AllStack(); 1412 1413 // Yeah, yeah, yeah, I know, I know. I can refactor this 1414 // to avoid the GOTO, although the refactored code will not 1415 // be much clearer. We arrive here IFF we have a stack-based 1416 // live range that cannot color in the current chunk, and it 1417 // has to move into the next free stack chunk. 1418 int chunk = 0; // Current chunk is first chunk 1419 retry_next_chunk: 1420 1421 // Remove neighbor colors 1422 IndexSet *s = _ifg->neighbors(lidx); 1423 1424 debug_only(RegMask orig_mask = lrg->mask();) 1425 IndexSetIterator elements(s); 1426 uint neighbor; 1427 while ((neighbor = elements.next()) != 0) { 1428 // Note that neighbor might be a spill_reg. In this case, exclusion 1429 // of its color will be a no-op, since the spill_reg chunk is in outer 1430 // space. Also, if neighbor is in a different chunk, this exclusion 1431 // will be a no-op. (Later on, if lrg runs out of possible colors in 1432 // its chunk, a new chunk of color may be tried, in which case 1433 // examination of neighbors is started again, at retry_next_chunk.) 1434 LRG &nlrg = lrgs(neighbor); 1435 OptoReg::Name nreg = nlrg.reg(); 1436 // Only subtract masks in the same chunk 1437 if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) { 1438 #ifndef PRODUCT 1439 uint size = lrg->mask().Size(); 1440 RegMask rm = lrg->mask(); 1441 #endif 1442 lrg->SUBTRACT(nlrg.mask()); 1443 #ifndef PRODUCT 1444 if (trace_spilling() && lrg->mask().Size() != size) { 1445 ttyLocker ttyl; 1446 tty->print("L%d ", lidx); 1447 rm.dump(); 1448 tty->print(" intersected L%d ", neighbor); 1449 nlrg.mask().dump(); 1450 tty->print(" removed "); 1451 rm.SUBTRACT(lrg->mask()); 1452 rm.dump(); 1453 tty->print(" leaving "); 1454 lrg->mask().dump(); 1455 tty->cr(); 1456 } 1457 #endif 1458 } 1459 } 1460 //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness"); 1461 // Aligned pairs need aligned masks 1462 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); 1463 if (lrg->num_regs() > 1 && !lrg->_fat_proj) { 1464 lrg->clear_to_sets(); 1465 } 1466 1467 // Check if a color is available and if so pick the color 1468 OptoReg::Name reg = choose_color( *lrg, chunk ); 1469 #ifdef SPARC 1470 debug_only(lrg->compute_set_mask_size()); 1471 assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned"); 1472 #endif 1473 1474 //--------------- 1475 // If we fail to color and the AllStack flag is set, trigger 1476 // a chunk-rollover event 1477 if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) { 1478 // Bump register mask up to next stack chunk 1479 chunk += RegMask::CHUNK_SIZE; 1480 lrg->Set_All(); 1481 1482 goto retry_next_chunk; 1483 } 1484 1485 //--------------- 1486 // Did we get a color? 1487 else if( OptoReg::is_valid(reg)) { 1488 #ifndef PRODUCT 1489 RegMask avail_rm = lrg->mask(); 1490 #endif 1491 1492 // Record selected register 1493 lrg->set_reg(reg); 1494 1495 if( reg >= _max_reg ) // Compute max register limit 1496 _max_reg = OptoReg::add(reg,1); 1497 // Fold reg back into normal space 1498 reg = OptoReg::add(reg,-chunk); 1499 1500 // If the live range is not bound, then we actually had some choices 1501 // to make. In this case, the mask has more bits in it than the colors 1502 // chosen. Restrict the mask to just what was picked. 1503 int n_regs = lrg->num_regs(); 1504 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); 1505 if (n_regs == 1 || !lrg->_fat_proj) { 1506 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity"); 1507 lrg->Clear(); // Clear the mask 1508 lrg->Insert(reg); // Set regmask to match selected reg 1509 // For vectors and pairs, also insert the low bit of the pair 1510 for (int i = 1; i < n_regs; i++) 1511 lrg->Insert(OptoReg::add(reg,-i)); 1512 lrg->set_mask_size(n_regs); 1513 } else { // Else fatproj 1514 // mask must be equal to fatproj bits, by definition 1515 } 1516 #ifndef PRODUCT 1517 if (trace_spilling()) { 1518 ttyLocker ttyl; 1519 tty->print("L%d selected ", lidx); 1520 lrg->mask().dump(); 1521 tty->print(" from "); 1522 avail_rm.dump(); 1523 tty->cr(); 1524 } 1525 #endif 1526 // Note that reg is the highest-numbered register in the newly-bound mask. 1527 } // end color available case 1528 1529 //--------------- 1530 // Live range is live and no colors available 1531 else { 1532 assert( lrg->alive(), "" ); 1533 assert( !lrg->_fat_proj || lrg->is_multidef() || 1534 lrg->_def->outcnt() > 0, "fat_proj cannot spill"); 1535 assert( !orig_mask.is_AllStack(), "All Stack does not spill" ); 1536 1537 // Assign the special spillreg register 1538 lrg->set_reg(OptoReg::Name(spill_reg++)); 1539 // Do not empty the regmask; leave mask_size lying around 1540 // for use during Spilling 1541 #ifndef PRODUCT 1542 if( trace_spilling() ) { 1543 ttyLocker ttyl; 1544 tty->print("L%d spilling with neighbors: ", lidx); 1545 s->dump(); 1546 debug_only(tty->print(" original mask: ")); 1547 debug_only(orig_mask.dump()); 1548 dump_lrg(lidx); 1549 } 1550 #endif 1551 } // end spill case 1552 1553 } 1554 1555 return spill_reg-LRG::SPILL_REG; // Return number of spills 1556 } 1557 1558 // Copy 'was_spilled'-edness from the source Node to the dst Node. 1559 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) { 1560 if( _spilled_once.test(src->_idx) ) { 1561 _spilled_once.set(dst->_idx); 1562 lrgs(_lrg_map.find(dst))._was_spilled1 = 1; 1563 if( _spilled_twice.test(src->_idx) ) { 1564 _spilled_twice.set(dst->_idx); 1565 lrgs(_lrg_map.find(dst))._was_spilled2 = 1; 1566 } 1567 } 1568 } 1569 1570 // Set the 'spilled_once' or 'spilled_twice' flag on a node. 1571 void PhaseChaitin::set_was_spilled( Node *n ) { 1572 if( _spilled_once.test_set(n->_idx) ) 1573 _spilled_twice.set(n->_idx); 1574 } 1575 1576 // Convert Ideal spill instructions into proper FramePtr + offset Loads and 1577 // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are. 1578 void PhaseChaitin::fixup_spills() { 1579 // This function does only cisc spill work. 1580 if( !UseCISCSpill ) return; 1581 1582 NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); ) 1583 1584 // Grab the Frame Pointer 1585 Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr); 1586 1587 // For all blocks 1588 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 1589 Block* block = _cfg.get_block(i); 1590 1591 // For all instructions in block 1592 uint last_inst = block->end_idx(); 1593 for (uint j = 1; j <= last_inst; j++) { 1594 Node* n = block->get_node(j); 1595 1596 // Dead instruction??? 1597 assert( n->outcnt() != 0 ||// Nothing dead after post alloc 1598 C->top() == n || // Or the random TOP node 1599 n->is_Proj(), // Or a fat-proj kill node 1600 "No dead instructions after post-alloc" ); 1601 1602 int inp = n->cisc_operand(); 1603 if( inp != AdlcVMDeps::Not_cisc_spillable ) { 1604 // Convert operand number to edge index number 1605 MachNode *mach = n->as_Mach(); 1606 inp = mach->operand_index(inp); 1607 Node *src = n->in(inp); // Value to load or store 1608 LRG &lrg_cisc = lrgs(_lrg_map.find_const(src)); 1609 OptoReg::Name src_reg = lrg_cisc.reg(); 1610 // Doubles record the HIGH register of an adjacent pair. 1611 src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs()); 1612 if( OptoReg::is_stack(src_reg) ) { // If input is on stack 1613 // This is a CISC Spill, get stack offset and construct new node 1614 #ifndef PRODUCT 1615 if( TraceCISCSpill ) { 1616 tty->print(" reg-instr: "); 1617 n->dump(); 1618 } 1619 #endif 1620 int stk_offset = reg2offset(src_reg); 1621 // Bailout if we might exceed node limit when spilling this instruction 1622 C->check_node_count(0, "out of nodes fixing spills"); 1623 if (C->failing()) return; 1624 // Transform node 1625 MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach(); 1626 cisc->set_req(inp,fp); // Base register is frame pointer 1627 if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) { 1628 assert( cisc->oper_input_base() == 2, "Only adding one edge"); 1629 cisc->ins_req(1,src); // Requires a memory edge 1630 } 1631 block->map_node(cisc, j); // Insert into basic block 1632 n->subsume_by(cisc, C); // Correct graph 1633 // 1634 ++_used_cisc_instructions; 1635 #ifndef PRODUCT 1636 if( TraceCISCSpill ) { 1637 tty->print(" cisc-instr: "); 1638 cisc->dump(); 1639 } 1640 #endif 1641 } else { 1642 #ifndef PRODUCT 1643 if( TraceCISCSpill ) { 1644 tty->print(" using reg-instr: "); 1645 n->dump(); 1646 } 1647 #endif 1648 ++_unused_cisc_instructions; // input can be on stack 1649 } 1650 } 1651 1652 } // End of for all instructions 1653 1654 } // End of for all blocks 1655 } 1656 1657 // Helper to stretch above; recursively discover the base Node for a 1658 // given derived Node. Easy for AddP-related machine nodes, but needs 1659 // to be recursive for derived Phis. 1660 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) { 1661 // See if already computed; if so return it 1662 if( derived_base_map[derived->_idx] ) 1663 return derived_base_map[derived->_idx]; 1664 1665 // See if this happens to be a base. 1666 // NOTE: we use TypePtr instead of TypeOopPtr because we can have 1667 // pointers derived from NULL! These are always along paths that 1668 // can't happen at run-time but the optimizer cannot deduce it so 1669 // we have to handle it gracefully. 1670 assert(!derived->bottom_type()->isa_narrowoop() || 1671 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); 1672 const TypePtr *tj = derived->bottom_type()->isa_ptr(); 1673 // If its an OOP with a non-zero offset, then it is derived. 1674 if( tj == NULL || tj->_offset == 0 ) { 1675 derived_base_map[derived->_idx] = derived; 1676 return derived; 1677 } 1678 // Derived is NULL+offset? Base is NULL! 1679 if( derived->is_Con() ) { 1680 Node *base = _matcher.mach_null(); 1681 assert(base != NULL, "sanity"); 1682 if (base->in(0) == NULL) { 1683 // Initialize it once and make it shared: 1684 // set control to _root and place it into Start block 1685 // (where top() node is placed). 1686 base->init_req(0, _cfg.get_root_node()); 1687 Block *startb = _cfg.get_block_for_node(C->top()); 1688 uint node_pos = startb->find_node(C->top()); 1689 startb->insert_node(base, node_pos); 1690 _cfg.map_node_to_block(base, startb); 1691 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); 1692 1693 // The loadConP0 might have projection nodes depending on architecture 1694 // Add the projection nodes to the CFG 1695 for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) { 1696 Node* use = base->fast_out(i); 1697 if (use->is_MachProj()) { 1698 startb->insert_node(use, ++node_pos); 1699 _cfg.map_node_to_block(use, startb); 1700 new_lrg(use, maxlrg++); 1701 } 1702 } 1703 } 1704 if (_lrg_map.live_range_id(base) == 0) { 1705 new_lrg(base, maxlrg++); 1706 } 1707 assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared"); 1708 derived_base_map[derived->_idx] = base; 1709 return base; 1710 } 1711 1712 // Check for AddP-related opcodes 1713 if (!derived->is_Phi()) { 1714 assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name())); 1715 Node *base = derived->in(AddPNode::Base); 1716 derived_base_map[derived->_idx] = base; 1717 return base; 1718 } 1719 1720 // Recursively find bases for Phis. 1721 // First check to see if we can avoid a base Phi here. 1722 Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg); 1723 uint i; 1724 for( i = 2; i < derived->req(); i++ ) 1725 if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg)) 1726 break; 1727 // Went to the end without finding any different bases? 1728 if( i == derived->req() ) { // No need for a base Phi here 1729 derived_base_map[derived->_idx] = base; 1730 return base; 1731 } 1732 1733 // Now we see we need a base-Phi here to merge the bases 1734 const Type *t = base->bottom_type(); 1735 base = new (C) PhiNode( derived->in(0), t ); 1736 for( i = 1; i < derived->req(); i++ ) { 1737 base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg)); 1738 t = t->meet(base->in(i)->bottom_type()); 1739 } 1740 base->as_Phi()->set_type(t); 1741 1742 // Search the current block for an existing base-Phi 1743 Block *b = _cfg.get_block_for_node(derived); 1744 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi 1745 Node *phi = b->get_node(i); 1746 if( !phi->is_Phi() ) { // Found end of Phis with no match? 1747 b->insert_node(base, i); // Must insert created Phi here as base 1748 _cfg.map_node_to_block(base, b); 1749 new_lrg(base,maxlrg++); 1750 break; 1751 } 1752 // See if Phi matches. 1753 uint j; 1754 for( j = 1; j < base->req(); j++ ) 1755 if( phi->in(j) != base->in(j) && 1756 !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs 1757 break; 1758 if( j == base->req() ) { // All inputs match? 1759 base = phi; // Then use existing 'phi' and drop 'base' 1760 break; 1761 } 1762 } 1763 1764 1765 // Cache info for later passes 1766 derived_base_map[derived->_idx] = base; 1767 return base; 1768 } 1769 1770 // At each Safepoint, insert extra debug edges for each pair of derived value/ 1771 // base pointer that is live across the Safepoint for oopmap building. The 1772 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the 1773 // required edge set. 1774 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) { 1775 int must_recompute_live = false; 1776 uint maxlrg = _lrg_map.max_lrg_id(); 1777 Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique()); 1778 memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); 1779 1780 // For all blocks in RPO do... 1781 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 1782 Block* block = _cfg.get_block(i); 1783 // Note use of deep-copy constructor. I cannot hammer the original 1784 // liveout bits, because they are needed by the following coalesce pass. 1785 IndexSet liveout(_live->live(block)); 1786 1787 for (uint j = block->end_idx() + 1; j > 1; j--) { 1788 Node* n = block->get_node(j - 1); 1789 1790 // Pre-split compares of loop-phis. Loop-phis form a cycle we would 1791 // like to see in the same register. Compare uses the loop-phi and so 1792 // extends its live range BUT cannot be part of the cycle. If this 1793 // extended live range overlaps with the update of the loop-phi value 1794 // we need both alive at the same time -- which requires at least 1 1795 // copy. But because Intel has only 2-address registers we end up with 1796 // at least 2 copies, one before the loop-phi update instruction and 1797 // one after. Instead we split the input to the compare just after the 1798 // phi. 1799 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { 1800 Node *phi = n->in(1); 1801 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { 1802 Block *phi_block = _cfg.get_block_for_node(phi); 1803 if (_cfg.get_block_for_node(phi_block->pred(2)) == block) { 1804 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; 1805 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); 1806 insert_proj( phi_block, 1, spill, maxlrg++ ); 1807 n->set_req(1,spill); 1808 must_recompute_live = true; 1809 } 1810 } 1811 } 1812 1813 // Get value being defined 1814 uint lidx = _lrg_map.live_range_id(n); 1815 // Ignore the occasional brand-new live range 1816 if (lidx && lidx < _lrg_map.max_lrg_id()) { 1817 // Remove from live-out set 1818 liveout.remove(lidx); 1819 1820 // Copies do not define a new value and so do not interfere. 1821 // Remove the copies source from the liveout set before interfering. 1822 uint idx = n->is_Copy(); 1823 if (idx) { 1824 liveout.remove(_lrg_map.live_range_id(n->in(idx))); 1825 } 1826 } 1827 1828 // Found a safepoint? 1829 JVMState *jvms = n->jvms(); 1830 if( jvms ) { 1831 // Now scan for a live derived pointer 1832 IndexSetIterator elements(&liveout); 1833 uint neighbor; 1834 while ((neighbor = elements.next()) != 0) { 1835 // Find reaching DEF for base and derived values 1836 // This works because we are still in SSA during this call. 1837 Node *derived = lrgs(neighbor)._def; 1838 const TypePtr *tj = derived->bottom_type()->isa_ptr(); 1839 assert(!derived->bottom_type()->isa_narrowoop() || 1840 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); 1841 // If its an OOP with a non-zero offset, then it is derived. 1842 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { 1843 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); 1844 assert(base->_idx < _lrg_map.size(), ""); 1845 // Add reaching DEFs of derived pointer and base pointer as a 1846 // pair of inputs 1847 n->add_req(derived); 1848 n->add_req(base); 1849 1850 // See if the base pointer is already live to this point. 1851 // Since I'm working on the SSA form, live-ness amounts to 1852 // reaching def's. So if I find the base's live range then 1853 // I know the base's def reaches here. 1854 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or 1855 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND 1856 (_lrg_map.live_range_id(base) > 0) && // not a constant 1857 _cfg.get_block_for_node(base) != block) { // base not def'd in blk) 1858 // Base pointer is not currently live. Since I stretched 1859 // the base pointer to here and it crosses basic-block 1860 // boundaries, the global live info is now incorrect. 1861 // Recompute live. 1862 must_recompute_live = true; 1863 } // End of if base pointer is not live to debug info 1864 } 1865 } // End of scan all live data for derived ptrs crossing GC point 1866 } // End of if found a GC point 1867 1868 // Make all inputs live 1869 if (!n->is_Phi()) { // Phi function uses come from prior block 1870 for (uint k = 1; k < n->req(); k++) { 1871 uint lidx = _lrg_map.live_range_id(n->in(k)); 1872 if (lidx < _lrg_map.max_lrg_id()) { 1873 liveout.insert(lidx); 1874 } 1875 } 1876 } 1877 1878 } // End of forall instructions in block 1879 liveout.clear(); // Free the memory used by liveout. 1880 1881 } // End of forall blocks 1882 _lrg_map.set_max_lrg_id(maxlrg); 1883 1884 // If I created a new live range I need to recompute live 1885 if (maxlrg != _ifg->_maxlrg) { 1886 must_recompute_live = true; 1887 } 1888 1889 return must_recompute_live != 0; 1890 } 1891 1892 // Extend the node to LRG mapping 1893 1894 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { 1895 _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); 1896 } 1897 1898 #ifndef PRODUCT 1899 void PhaseChaitin::dump(const Node *n) const { 1900 uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; 1901 tty->print("L%d",r); 1902 if (r && n->Opcode() != Op_Phi) { 1903 if( _node_regs ) { // Got a post-allocation copy of allocation? 1904 tty->print("["); 1905 OptoReg::Name second = get_reg_second(n); 1906 if( OptoReg::is_valid(second) ) { 1907 if( OptoReg::is_reg(second) ) 1908 tty->print("%s:",Matcher::regName[second]); 1909 else 1910 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second)); 1911 } 1912 OptoReg::Name first = get_reg_first(n); 1913 if( OptoReg::is_reg(first) ) 1914 tty->print("%s]",Matcher::regName[first]); 1915 else 1916 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first)); 1917 } else 1918 n->out_RegMask().dump(); 1919 } 1920 tty->print("/N%d\t",n->_idx); 1921 tty->print("%s === ", n->Name()); 1922 uint k; 1923 for (k = 0; k < n->req(); k++) { 1924 Node *m = n->in(k); 1925 if (!m) { 1926 tty->print("_ "); 1927 } 1928 else { 1929 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; 1930 tty->print("L%d",r); 1931 // Data MultiNode's can have projections with no real registers. 1932 // Don't die while dumping them. 1933 int op = n->Opcode(); 1934 if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) { 1935 if( _node_regs ) { 1936 tty->print("["); 1937 OptoReg::Name second = get_reg_second(n->in(k)); 1938 if( OptoReg::is_valid(second) ) { 1939 if( OptoReg::is_reg(second) ) 1940 tty->print("%s:",Matcher::regName[second]); 1941 else 1942 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), 1943 reg2offset_unchecked(second)); 1944 } 1945 OptoReg::Name first = get_reg_first(n->in(k)); 1946 if( OptoReg::is_reg(first) ) 1947 tty->print("%s]",Matcher::regName[first]); 1948 else 1949 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), 1950 reg2offset_unchecked(first)); 1951 } else 1952 n->in_RegMask(k).dump(); 1953 } 1954 tty->print("/N%d ",m->_idx); 1955 } 1956 } 1957 if( k < n->len() && n->in(k) ) tty->print("| "); 1958 for( ; k < n->len(); k++ ) { 1959 Node *m = n->in(k); 1960 if(!m) { 1961 break; 1962 } 1963 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; 1964 tty->print("L%d",r); 1965 tty->print("/N%d ",m->_idx); 1966 } 1967 if( n->is_Mach() ) n->as_Mach()->dump_spec(tty); 1968 else n->dump_spec(tty); 1969 if( _spilled_once.test(n->_idx ) ) { 1970 tty->print(" Spill_1"); 1971 if( _spilled_twice.test(n->_idx ) ) 1972 tty->print(" Spill_2"); 1973 } 1974 tty->print("\n"); 1975 } 1976 1977 void PhaseChaitin::dump(const Block *b) const { 1978 b->dump_head(&_cfg); 1979 1980 // For all instructions 1981 for( uint j = 0; j < b->number_of_nodes(); j++ ) 1982 dump(b->get_node(j)); 1983 // Print live-out info at end of block 1984 if( _live ) { 1985 tty->print("Liveout: "); 1986 IndexSet *live = _live->live(b); 1987 IndexSetIterator elements(live); 1988 tty->print("{"); 1989 uint i; 1990 while ((i = elements.next()) != 0) { 1991 tty->print("L%d ", _lrg_map.find_const(i)); 1992 } 1993 tty->print_cr("}"); 1994 } 1995 tty->print("\n"); 1996 } 1997 1998 void PhaseChaitin::dump() const { 1999 tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n", 2000 _matcher._new_SP, _framesize ); 2001 2002 // For all blocks 2003 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2004 dump(_cfg.get_block(i)); 2005 } 2006 // End of per-block dump 2007 tty->print("\n"); 2008 2009 if (!_ifg) { 2010 tty->print("(No IFG.)\n"); 2011 return; 2012 } 2013 2014 // Dump LRG array 2015 tty->print("--- Live RanGe Array ---\n"); 2016 for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) { 2017 tty->print("L%d: ",i2); 2018 if (i2 < _ifg->_maxlrg) { 2019 lrgs(i2).dump(); 2020 } 2021 else { 2022 tty->print_cr("new LRG"); 2023 } 2024 } 2025 tty->cr(); 2026 2027 // Dump lo-degree list 2028 tty->print("Lo degree: "); 2029 for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next ) 2030 tty->print("L%d ",i3); 2031 tty->cr(); 2032 2033 // Dump lo-stk-degree list 2034 tty->print("Lo stk degree: "); 2035 for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next ) 2036 tty->print("L%d ",i4); 2037 tty->cr(); 2038 2039 // Dump lo-degree list 2040 tty->print("Hi degree: "); 2041 for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next ) 2042 tty->print("L%d ",i5); 2043 tty->cr(); 2044 } 2045 2046 void PhaseChaitin::dump_degree_lists() const { 2047 // Dump lo-degree list 2048 tty->print("Lo degree: "); 2049 for( uint i = _lo_degree; i; i = lrgs(i)._next ) 2050 tty->print("L%d ",i); 2051 tty->cr(); 2052 2053 // Dump lo-stk-degree list 2054 tty->print("Lo stk degree: "); 2055 for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next ) 2056 tty->print("L%d ",i2); 2057 tty->cr(); 2058 2059 // Dump lo-degree list 2060 tty->print("Hi degree: "); 2061 for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next ) 2062 tty->print("L%d ",i3); 2063 tty->cr(); 2064 } 2065 2066 void PhaseChaitin::dump_simplified() const { 2067 tty->print("Simplified: "); 2068 for( uint i = _simplified; i; i = lrgs(i)._next ) 2069 tty->print("L%d ",i); 2070 tty->cr(); 2071 } 2072 2073 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) { 2074 if ((int)reg < 0) 2075 sprintf(buf, "<OptoReg::%d>", (int)reg); 2076 else if (OptoReg::is_reg(reg)) 2077 strcpy(buf, Matcher::regName[reg]); 2078 else 2079 sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer), 2080 pc->reg2offset(reg)); 2081 return buf+strlen(buf); 2082 } 2083 2084 // Dump a register name into a buffer. Be intelligent if we get called 2085 // before allocation is complete. 2086 char *PhaseChaitin::dump_register( const Node *n, char *buf ) const { 2087 if( !this ) { // Not got anything? 2088 sprintf(buf,"N%d",n->_idx); // Then use Node index 2089 } else if( _node_regs ) { 2090 // Post allocation, use direct mappings, no LRG info available 2091 print_reg( get_reg_first(n), this, buf ); 2092 } else { 2093 uint lidx = _lrg_map.find_const(n); // Grab LRG number 2094 if( !_ifg ) { 2095 sprintf(buf,"L%d",lidx); // No register binding yet 2096 } else if( !lidx ) { // Special, not allocated value 2097 strcpy(buf,"Special"); 2098 } else { 2099 if (lrgs(lidx)._is_vector) { 2100 if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs())) 2101 print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register 2102 else 2103 sprintf(buf,"L%d",lidx); // No register binding yet 2104 } else if( (lrgs(lidx).num_regs() == 1) 2105 ? lrgs(lidx).mask().is_bound1() 2106 : lrgs(lidx).mask().is_bound_pair() ) { 2107 // Hah! We have a bound machine register 2108 print_reg( lrgs(lidx).reg(), this, buf ); 2109 } else { 2110 sprintf(buf,"L%d",lidx); // No register binding yet 2111 } 2112 } 2113 } 2114 return buf+strlen(buf); 2115 } 2116 2117 void PhaseChaitin::dump_for_spill_split_recycle() const { 2118 if( WizardMode && (PrintCompilation || PrintOpto) ) { 2119 // Display which live ranges need to be split and the allocator's state 2120 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt); 2121 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) { 2122 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { 2123 tty->print("L%d: ", bidx); 2124 lrgs(bidx).dump(); 2125 } 2126 } 2127 tty->cr(); 2128 dump(); 2129 } 2130 } 2131 2132 void PhaseChaitin::dump_frame() const { 2133 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer); 2134 const TypeTuple *domain = C->tf()->domain(); 2135 const int argcnt = domain->cnt() - TypeFunc::Parms; 2136 2137 // Incoming arguments in registers dump 2138 for( int k = 0; k < argcnt; k++ ) { 2139 OptoReg::Name parmreg = _matcher._parm_regs[k].first(); 2140 if( OptoReg::is_reg(parmreg)) { 2141 const char *reg_name = OptoReg::regname(parmreg); 2142 tty->print("#r%3.3d %s", parmreg, reg_name); 2143 parmreg = _matcher._parm_regs[k].second(); 2144 if( OptoReg::is_reg(parmreg)) { 2145 tty->print(":%s", OptoReg::regname(parmreg)); 2146 } 2147 tty->print(" : parm %d: ", k); 2148 domain->field_at(k + TypeFunc::Parms)->dump(); 2149 tty->cr(); 2150 } 2151 } 2152 2153 // Check for un-owned padding above incoming args 2154 OptoReg::Name reg = _matcher._new_SP; 2155 if( reg > _matcher._in_arg_limit ) { 2156 reg = OptoReg::add(reg, -1); 2157 tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg)); 2158 } 2159 2160 // Incoming argument area dump 2161 OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots()); 2162 while( reg > begin_in_arg ) { 2163 reg = OptoReg::add(reg, -1); 2164 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg)); 2165 int j; 2166 for( j = 0; j < argcnt; j++) { 2167 if( _matcher._parm_regs[j].first() == reg || 2168 _matcher._parm_regs[j].second() == reg ) { 2169 tty->print("parm %d: ",j); 2170 domain->field_at(j + TypeFunc::Parms)->dump(); 2171 tty->cr(); 2172 break; 2173 } 2174 } 2175 if( j >= argcnt ) 2176 tty->print_cr("HOLE, owned by SELF"); 2177 } 2178 2179 // Old outgoing preserve area 2180 while( reg > _matcher._old_SP ) { 2181 reg = OptoReg::add(reg, -1); 2182 tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg)); 2183 } 2184 2185 // Old SP 2186 tty->print_cr("# -- Old %s -- Framesize: %d --",fp, 2187 reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize); 2188 2189 // Preserve area dump 2190 int fixed_slots = C->fixed_slots(); 2191 OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots()); 2192 OptoReg::Name return_addr = _matcher.return_addr(); 2193 2194 reg = OptoReg::add(reg, -1); 2195 while (OptoReg::is_stack(reg)) { 2196 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg)); 2197 if (return_addr == reg) { 2198 tty->print_cr("return address"); 2199 } else if (reg >= begin_in_preserve) { 2200 // Preserved slots are present on x86 2201 if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word)) 2202 tty->print_cr("saved fp register"); 2203 else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) && 2204 VerifyStackAtCalls) 2205 tty->print_cr("0xBADB100D +VerifyStackAtCalls"); 2206 else 2207 tty->print_cr("in_preserve"); 2208 } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) { 2209 tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg)); 2210 } else { 2211 tty->print_cr("pad2, stack alignment"); 2212 } 2213 reg = OptoReg::add(reg, -1); 2214 } 2215 2216 // Spill area dump 2217 reg = OptoReg::add(_matcher._new_SP, _framesize ); 2218 while( reg > _matcher._out_arg_limit ) { 2219 reg = OptoReg::add(reg, -1); 2220 tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg)); 2221 } 2222 2223 // Outgoing argument area dump 2224 while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) { 2225 reg = OptoReg::add(reg, -1); 2226 tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg)); 2227 } 2228 2229 // Outgoing new preserve area 2230 while( reg > _matcher._new_SP ) { 2231 reg = OptoReg::add(reg, -1); 2232 tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg)); 2233 } 2234 tty->print_cr("#"); 2235 } 2236 2237 void PhaseChaitin::dump_bb( uint pre_order ) const { 2238 tty->print_cr("---dump of B%d---",pre_order); 2239 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2240 Block* block = _cfg.get_block(i); 2241 if (block->_pre_order == pre_order) { 2242 dump(block); 2243 } 2244 } 2245 } 2246 2247 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { 2248 tty->print_cr("---dump of L%d---",lidx); 2249 2250 if (_ifg) { 2251 if (lidx >= _lrg_map.max_lrg_id()) { 2252 tty->print("Attempt to print live range index beyond max live range.\n"); 2253 return; 2254 } 2255 tty->print("L%d: ",lidx); 2256 if (lidx < _ifg->_maxlrg) { 2257 lrgs(lidx).dump(); 2258 } else { 2259 tty->print_cr("new LRG"); 2260 } 2261 } 2262 if( _ifg && lidx < _ifg->_maxlrg) { 2263 tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); 2264 _ifg->neighbors(lidx)->dump(); 2265 tty->cr(); 2266 } 2267 // For all blocks 2268 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 2269 Block* block = _cfg.get_block(i); 2270 int dump_once = 0; 2271 2272 // For all instructions 2273 for( uint j = 0; j < block->number_of_nodes(); j++ ) { 2274 Node *n = block->get_node(j); 2275 if (_lrg_map.find_const(n) == lidx) { 2276 if (!dump_once++) { 2277 tty->cr(); 2278 block->dump_head(&_cfg); 2279 } 2280 dump(n); 2281 continue; 2282 } 2283 if (!defs_only) { 2284 uint cnt = n->req(); 2285 for( uint k = 1; k < cnt; k++ ) { 2286 Node *m = n->in(k); 2287 if (!m) { 2288 continue; // be robust in the dumper 2289 } 2290 if (_lrg_map.find_const(m) == lidx) { 2291 if (!dump_once++) { 2292 tty->cr(); 2293 block->dump_head(&_cfg); 2294 } 2295 dump(n); 2296 } 2297 } 2298 } 2299 } 2300 } // End of per-block dump 2301 tty->cr(); 2302 } 2303 #endif // not PRODUCT 2304 2305 int PhaseChaitin::_final_loads = 0; 2306 int PhaseChaitin::_final_stores = 0; 2307 int PhaseChaitin::_final_memoves= 0; 2308 int PhaseChaitin::_final_copies = 0; 2309 double PhaseChaitin::_final_load_cost = 0; 2310 double PhaseChaitin::_final_store_cost = 0; 2311 double PhaseChaitin::_final_memove_cost= 0; 2312 double PhaseChaitin::_final_copy_cost = 0; 2313 int PhaseChaitin::_conserv_coalesce = 0; 2314 int PhaseChaitin::_conserv_coalesce_pair = 0; 2315 int PhaseChaitin::_conserv_coalesce_trie = 0; 2316 int PhaseChaitin::_conserv_coalesce_quad = 0; 2317 int PhaseChaitin::_post_alloc = 0; 2318 int PhaseChaitin::_lost_opp_pp_coalesce = 0; 2319 int PhaseChaitin::_lost_opp_cflow_coalesce = 0; 2320 int PhaseChaitin::_used_cisc_instructions = 0; 2321 int PhaseChaitin::_unused_cisc_instructions = 0; 2322 int PhaseChaitin::_allocator_attempts = 0; 2323 int PhaseChaitin::_allocator_successes = 0; 2324 2325 #ifndef PRODUCT 2326 uint PhaseChaitin::_high_pressure = 0; 2327 uint PhaseChaitin::_low_pressure = 0; 2328 2329 void PhaseChaitin::print_chaitin_statistics() { 2330 tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies); 2331 tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost); 2332 tty->print_cr("Adjusted spill cost = %7.0f.", 2333 _final_load_cost*4.0 + _final_store_cost * 2.0 + 2334 _final_copy_cost*1.0 + _final_memove_cost*12.0); 2335 tty->print("Conservatively coalesced %d copies, %d pairs", 2336 _conserv_coalesce, _conserv_coalesce_pair); 2337 if( _conserv_coalesce_trie || _conserv_coalesce_quad ) 2338 tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad); 2339 tty->print_cr(", %d post alloc.", _post_alloc); 2340 if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce ) 2341 tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.", 2342 _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce ); 2343 if( _used_cisc_instructions || _unused_cisc_instructions ) 2344 tty->print_cr("Used cisc instruction %d, remained in register %d", 2345 _used_cisc_instructions, _unused_cisc_instructions); 2346 if( _allocator_successes != 0 ) 2347 tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes); 2348 tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure); 2349 } 2350 #endif // not PRODUCT