1 /* 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 // Portions of code courtesy of Clifford Click 26 27 // Optimization - Graph Style 28 29 #include "incls/_precompiled.incl" 30 #include "incls/_gcm.cpp.incl" 31 32 // To avoid float value underflow 33 #define MIN_BLOCK_FREQUENCY 1.e-35f 34 35 //----------------------------schedule_node_into_block------------------------- 36 // Insert node n into block b. Look for projections of n and make sure they 37 // are in b also. 38 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 39 // Set basic block of n, Add n to b, 40 _bbs.map(n->_idx, b); 41 b->add_inst(n); 42 43 // After Matching, nearly any old Node may have projections trailing it. 44 // These are usually machine-dependent flags. In any case, they might 45 // float to another block below this one. Move them up. 46 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 47 Node* use = n->fast_out(i); 48 if (use->is_Proj()) { 49 Block* buse = _bbs[use->_idx]; 50 if (buse != b) { // In wrong block? 51 if (buse != NULL) 52 buse->find_remove(use); // Remove from wrong block 53 _bbs.map(use->_idx, b); // Re-insert in this block 54 b->add_inst(use); 55 } 56 } 57 } 58 } 59 60 61 //------------------------------schedule_pinned_nodes-------------------------- 62 // Set the basic block for Nodes pinned into blocks 63 void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) { 64 // Allocate node stack of size C->unique()+8 to avoid frequent realloc 65 GrowableArray <Node *> spstack(C->unique()+8); 66 spstack.push(_root); 67 while ( spstack.is_nonempty() ) { 68 Node *n = spstack.pop(); 69 if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited 70 if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down! 71 Node *input = n->in(0); 72 assert( input, "pinned Node must have Control" ); 73 while( !input->is_block_start() ) 74 input = input->in(0); 75 Block *b = _bbs[input->_idx]; // Basic block of controlling input 76 schedule_node_into_block(n, b); 77 } 78 for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs 79 if( n->in(i) != NULL ) 80 spstack.push(n->in(i)); 81 } 82 } 83 } 84 } 85 86 #ifdef ASSERT 87 // Assert that new input b2 is dominated by all previous inputs. 88 // Check this by by seeing that it is dominated by b1, the deepest 89 // input observed until b2. 90 static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) { 91 if (b1 == NULL) return; 92 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 93 Block* tmp = b2; 94 while (tmp != b1 && tmp != NULL) { 95 tmp = tmp->_idom; 96 } 97 if (tmp != b1) { 98 // Detected an unschedulable graph. Print some nice stuff and die. 99 tty->print_cr("!!! Unschedulable graph !!!"); 100 for (uint j=0; j<n->len(); j++) { // For all inputs 101 Node* inn = n->in(j); // Get input 102 if (inn == NULL) continue; // Ignore NULL, missing inputs 103 Block* inb = bbs[inn->_idx]; 104 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 105 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 106 inn->dump(); 107 } 108 tty->print("Failing node: "); 109 n->dump(); 110 assert(false, "unscheduable graph"); 111 } 112 } 113 #endif 114 115 static Block* find_deepest_input(Node* n, Block_Array &bbs) { 116 // Find the last input dominated by all other inputs. 117 Block* deepb = NULL; // Deepest block so far 118 int deepb_dom_depth = 0; 119 for (uint k = 0; k < n->len(); k++) { // For all inputs 120 Node* inn = n->in(k); // Get input 121 if (inn == NULL) continue; // Ignore NULL, missing inputs 122 Block* inb = bbs[inn->_idx]; 123 assert(inb != NULL, "must already have scheduled this input"); 124 if (deepb_dom_depth < (int) inb->_dom_depth) { 125 // The new inb must be dominated by the previous deepb. 126 // The various inputs must be linearly ordered in the dom 127 // tree, or else there will not be a unique deepest block. 128 DEBUG_ONLY(assert_dom(deepb, inb, n, bbs)); 129 deepb = inb; // Save deepest block 130 deepb_dom_depth = deepb->_dom_depth; 131 } 132 } 133 assert(deepb != NULL, "must be at least one input to n"); 134 return deepb; 135 } 136 137 138 //------------------------------schedule_early--------------------------------- 139 // Find the earliest Block any instruction can be placed in. Some instructions 140 // are pinned into Blocks. Unpinned instructions can appear in last block in 141 // which all their inputs occur. 142 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 143 // Allocate stack with enough space to avoid frequent realloc 144 Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats 145 // roots.push(_root); _root will be processed among C->top() inputs 146 roots.push(C->top()); 147 visited.set(C->top()->_idx); 148 149 while (roots.size() != 0) { 150 // Use local variables nstack_top_n & nstack_top_i to cache values 151 // on stack's top. 152 Node *nstack_top_n = roots.pop(); 153 uint nstack_top_i = 0; 154 //while_nstack_nonempty: 155 while (true) { 156 // Get parent node and next input's index from stack's top. 157 Node *n = nstack_top_n; 158 uint i = nstack_top_i; 159 160 if (i == 0) { 161 // Special control input processing. 162 // While I am here, go ahead and look for Nodes which are taking control 163 // from a is_block_proj Node. After I inserted RegionNodes to make proper 164 // blocks, the control at a is_block_proj more properly comes from the 165 // Region being controlled by the block_proj Node. 166 const Node *in0 = n->in(0); 167 if (in0 != NULL) { // Control-dependent? 168 const Node *p = in0->is_block_proj(); 169 if (p != NULL && p != n) { // Control from a block projection? 170 // Find trailing Region 171 Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block 172 uint j = 0; 173 if (pb->_num_succs != 1) { // More then 1 successor? 174 // Search for successor 175 uint max = pb->_nodes.size(); 176 assert( max > 1, "" ); 177 uint start = max - pb->_num_succs; 178 // Find which output path belongs to projection 179 for (j = start; j < max; j++) { 180 if( pb->_nodes[j] == in0 ) 181 break; 182 } 183 assert( j < max, "must find" ); 184 // Change control to match head of successor basic block 185 j -= start; 186 } 187 n->set_req(0, pb->_succs[j]->head()); 188 } 189 } else { // n->in(0) == NULL 190 if (n->req() == 1) { // This guy is a constant with NO inputs? 191 n->set_req(0, _root); 192 } 193 } 194 } 195 196 // First, visit all inputs and force them to get a block. If an 197 // input is already in a block we quit following inputs (to avoid 198 // cycles). Instead we put that Node on a worklist to be handled 199 // later (since IT'S inputs may not have a block yet). 200 bool done = true; // Assume all n's inputs will be processed 201 while (i < n->len()) { // For all inputs 202 Node *in = n->in(i); // Get input 203 ++i; 204 if (in == NULL) continue; // Ignore NULL, missing inputs 205 int is_visited = visited.test_set(in->_idx); 206 if (!_bbs.lookup(in->_idx)) { // Missing block selection? 207 if (is_visited) { 208 // assert( !visited.test(in->_idx), "did not schedule early" ); 209 return false; 210 } 211 nstack.push(n, i); // Save parent node and next input's index. 212 nstack_top_n = in; // Process current input now. 213 nstack_top_i = 0; 214 done = false; // Not all n's inputs processed. 215 break; // continue while_nstack_nonempty; 216 } else if (!is_visited) { // Input not yet visited? 217 roots.push(in); // Visit this guy later, using worklist 218 } 219 } 220 if (done) { 221 // All of n's inputs have been processed, complete post-processing. 222 223 // Some instructions are pinned into a block. These include Region, 224 // Phi, Start, Return, and other control-dependent instructions and 225 // any projections which depend on them. 226 if (!n->pinned()) { 227 // Set earliest legal block. 228 _bbs.map(n->_idx, find_deepest_input(n, _bbs)); 229 } 230 231 if (nstack.is_empty()) { 232 // Finished all nodes on stack. 233 // Process next node on the worklist 'roots'. 234 break; 235 } 236 // Get saved parent node and next input's index. 237 nstack_top_n = nstack.node(); 238 nstack_top_i = nstack.index(); 239 nstack.pop(); 240 } // if (done) 241 } // while (true) 242 } // while (roots.size() != 0) 243 return true; 244 } 245 246 //------------------------------dom_lca---------------------------------------- 247 // Find least common ancestor in dominator tree 248 // LCA is a current notion of LCA, to be raised above 'this'. 249 // As a convenient boundary condition, return 'this' if LCA is NULL. 250 // Find the LCA of those two nodes. 251 Block* Block::dom_lca(Block* LCA) { 252 if (LCA == NULL || LCA == this) return this; 253 254 Block* anc = this; 255 while (anc->_dom_depth > LCA->_dom_depth) 256 anc = anc->_idom; // Walk up till anc is as high as LCA 257 258 while (LCA->_dom_depth > anc->_dom_depth) 259 LCA = LCA->_idom; // Walk up till LCA is as high as anc 260 261 while (LCA != anc) { // Walk both up till they are the same 262 LCA = LCA->_idom; 263 anc = anc->_idom; 264 } 265 266 return LCA; 267 } 268 269 //--------------------------raise_LCA_above_use-------------------------------- 270 // We are placing a definition, and have been given a def->use edge. 271 // The definition must dominate the use, so move the LCA upward in the 272 // dominator tree to dominate the use. If the use is a phi, adjust 273 // the LCA only with the phi input paths which actually use this def. 274 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) { 275 Block* buse = bbs[use->_idx]; 276 if (buse == NULL) return LCA; // Unused killing Projs have no use block 277 if (!use->is_Phi()) return buse->dom_lca(LCA); 278 uint pmax = use->req(); // Number of Phi inputs 279 // Why does not this loop just break after finding the matching input to 280 // the Phi? Well...it's like this. I do not have true def-use/use-def 281 // chains. Means I cannot distinguish, from the def-use direction, which 282 // of many use-defs lead from the same use to the same def. That is, this 283 // Phi might have several uses of the same def. Each use appears in a 284 // different predecessor block. But when I enter here, I cannot distinguish 285 // which use-def edge I should find the predecessor block for. So I find 286 // them all. Means I do a little extra work if a Phi uses the same value 287 // more than once. 288 for (uint j=1; j<pmax; j++) { // For all inputs 289 if (use->in(j) == def) { // Found matching input? 290 Block* pred = bbs[buse->pred(j)->_idx]; 291 LCA = pred->dom_lca(LCA); 292 } 293 } 294 return LCA; 295 } 296 297 //----------------------------raise_LCA_above_marks---------------------------- 298 // Return a new LCA that dominates LCA and any of its marked predecessors. 299 // Search all my parents up to 'early' (exclusive), looking for predecessors 300 // which are marked with the given index. Return the LCA (in the dom tree) 301 // of all marked blocks. If there are none marked, return the original 302 // LCA. 303 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, 304 Block* early, Block_Array &bbs) { 305 Block_List worklist; 306 worklist.push(LCA); 307 while (worklist.size() > 0) { 308 Block* mid = worklist.pop(); 309 if (mid == early) continue; // stop searching here 310 311 // Test and set the visited bit. 312 if (mid->raise_LCA_visited() == mark) continue; // already visited 313 314 // Don't process the current LCA, otherwise the search may terminate early 315 if (mid != LCA && mid->raise_LCA_mark() == mark) { 316 // Raise the LCA. 317 LCA = mid->dom_lca(LCA); 318 if (LCA == early) break; // stop searching everywhere 319 assert(early->dominates(LCA), "early is high enough"); 320 // Resume searching at that point, skipping intermediate levels. 321 worklist.push(LCA); 322 if (LCA == mid) 323 continue; // Don't mark as visited to avoid early termination. 324 } else { 325 // Keep searching through this block's predecessors. 326 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 327 Block* mid_parent = bbs[ mid->pred(j)->_idx ]; 328 worklist.push(mid_parent); 329 } 330 } 331 mid->set_raise_LCA_visited(mark); 332 } 333 return LCA; 334 } 335 336 //--------------------------memory_early_block-------------------------------- 337 // This is a variation of find_deepest_input, the heart of schedule_early. 338 // Find the "early" block for a load, if we considered only memory and 339 // address inputs, that is, if other data inputs were ignored. 340 // 341 // Because a subset of edges are considered, the resulting block will 342 // be earlier (at a shallower dom_depth) than the true schedule_early 343 // point of the node. We compute this earlier block as a more permissive 344 // site for anti-dependency insertion, but only if subsume_loads is enabled. 345 static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) { 346 Node* base; 347 Node* index; 348 Node* store = load->in(MemNode::Memory); 349 load->as_Mach()->memory_inputs(base, index); 350 351 assert(base != NodeSentinel && index != NodeSentinel, 352 "unexpected base/index inputs"); 353 354 Node* mem_inputs[4]; 355 int mem_inputs_length = 0; 356 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 357 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 358 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 359 360 // In the comparision below, add one to account for the control input, 361 // which may be null, but always takes up a spot in the in array. 362 if (mem_inputs_length + 1 < (int) load->req()) { 363 // This "load" has more inputs than just the memory, base and index inputs. 364 // For purposes of checking anti-dependences, we need to start 365 // from the early block of only the address portion of the instruction, 366 // and ignore other blocks that may have factored into the wider 367 // schedule_early calculation. 368 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 369 370 Block* deepb = NULL; // Deepest block so far 371 int deepb_dom_depth = 0; 372 for (int i = 0; i < mem_inputs_length; i++) { 373 Block* inb = bbs[mem_inputs[i]->_idx]; 374 if (deepb_dom_depth < (int) inb->_dom_depth) { 375 // The new inb must be dominated by the previous deepb. 376 // The various inputs must be linearly ordered in the dom 377 // tree, or else there will not be a unique deepest block. 378 DEBUG_ONLY(assert_dom(deepb, inb, load, bbs)); 379 deepb = inb; // Save deepest block 380 deepb_dom_depth = deepb->_dom_depth; 381 } 382 } 383 early = deepb; 384 } 385 386 return early; 387 } 388 389 //--------------------------insert_anti_dependences--------------------------- 390 // A load may need to witness memory that nearby stores can overwrite. 391 // For each nearby store, either insert an "anti-dependence" edge 392 // from the load to the store, or else move LCA upward to force the 393 // load to (eventually) be scheduled in a block above the store. 394 // 395 // Do not add edges to stores on distinct control-flow paths; 396 // only add edges to stores which might interfere. 397 // 398 // Return the (updated) LCA. There will not be any possibly interfering 399 // store between the load's "early block" and the updated LCA. 400 // Any stores in the updated LCA will have new precedence edges 401 // back to the load. The caller is expected to schedule the load 402 // in the LCA, in which case the precedence edges will make LCM 403 // preserve anti-dependences. The caller may also hoist the load 404 // above the LCA, if it is not the early block. 405 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 406 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 407 assert(LCA != NULL, ""); 408 DEBUG_ONLY(Block* LCA_orig = LCA); 409 410 // Compute the alias index. Loads and stores with different alias indices 411 // do not need anti-dependence edges. 412 uint load_alias_idx = C->get_alias_index(load->adr_type()); 413 #ifdef ASSERT 414 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 415 (PrintOpto || VerifyAliases || 416 PrintMiscellaneous && (WizardMode || Verbose))) { 417 // Load nodes should not consume all of memory. 418 // Reporting a bottom type indicates a bug in adlc. 419 // If some particular type of node validly consumes all of memory, 420 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 421 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 422 load->dump(2); 423 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 424 } 425 #endif 426 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 427 "String compare is only known 'load' that does not conflict with any stores"); 428 429 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 430 // It is impossible to spoil this load by putting stores before it, 431 // because we know that the stores will never update the value 432 // which 'load' must witness. 433 return LCA; 434 } 435 436 node_idx_t load_index = load->_idx; 437 438 // Note the earliest legal placement of 'load', as determined by 439 // by the unique point in the dom tree where all memory effects 440 // and other inputs are first available. (Computed by schedule_early.) 441 // For normal loads, 'early' is the shallowest place (dom graph wise) 442 // to look for anti-deps between this load and any store. 443 Block* early = _bbs[load_index]; 444 445 // If we are subsuming loads, compute an "early" block that only considers 446 // memory or address inputs. This block may be different than the 447 // schedule_early block in that it could be at an even shallower depth in the 448 // dominator tree, and allow for a broader discovery of anti-dependences. 449 if (C->subsume_loads()) { 450 early = memory_early_block(load, early, _bbs); 451 } 452 453 ResourceArea *area = Thread::current()->resource_area(); 454 Node_List worklist_mem(area); // prior memory state to store 455 Node_List worklist_store(area); // possible-def to explore 456 Node_List worklist_visited(area); // visited mergemem nodes 457 Node_List non_early_stores(area); // all relevant stores outside of early 458 bool must_raise_LCA = false; 459 460 #ifdef TRACK_PHI_INPUTS 461 // %%% This extra checking fails because MergeMem nodes are not GVNed. 462 // Provide "phi_inputs" to check if every input to a PhiNode is from the 463 // original memory state. This indicates a PhiNode for which should not 464 // prevent the load from sinking. For such a block, set_raise_LCA_mark 465 // may be overly conservative. 466 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 467 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 468 #endif 469 470 // 'load' uses some memory state; look for users of the same state. 471 // Recurse through MergeMem nodes to the stores that use them. 472 473 // Each of these stores is a possible definition of memory 474 // that 'load' needs to use. We need to force 'load' 475 // to occur before each such store. When the store is in 476 // the same block as 'load', we insert an anti-dependence 477 // edge load->store. 478 479 // The relevant stores "nearby" the load consist of a tree rooted 480 // at initial_mem, with internal nodes of type MergeMem. 481 // Therefore, the branches visited by the worklist are of this form: 482 // initial_mem -> (MergeMem ->)* store 483 // The anti-dependence constraints apply only to the fringe of this tree. 484 485 Node* initial_mem = load->in(MemNode::Memory); 486 worklist_store.push(initial_mem); 487 worklist_visited.push(initial_mem); 488 worklist_mem.push(NULL); 489 while (worklist_store.size() > 0) { 490 // Examine a nearby store to see if it might interfere with our load. 491 Node* mem = worklist_mem.pop(); 492 Node* store = worklist_store.pop(); 493 uint op = store->Opcode(); 494 495 // MergeMems do not directly have anti-deps. 496 // Treat them as internal nodes in a forward tree of memory states, 497 // the leaves of which are each a 'possible-def'. 498 if (store == initial_mem // root (exclusive) of tree we are searching 499 || op == Op_MergeMem // internal node of tree we are searching 500 ) { 501 mem = store; // It's not a possibly interfering store. 502 if (store == initial_mem) 503 initial_mem = NULL; // only process initial memory once 504 505 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 506 store = mem->fast_out(i); 507 if (store->is_MergeMem()) { 508 // Be sure we don't get into combinatorial problems. 509 // (Allow phis to be repeated; they can merge two relevant states.) 510 uint j = worklist_visited.size(); 511 for (; j > 0; j--) { 512 if (worklist_visited.at(j-1) == store) break; 513 } 514 if (j > 0) continue; // already on work list; do not repeat 515 worklist_visited.push(store); 516 } 517 worklist_mem.push(mem); 518 worklist_store.push(store); 519 } 520 continue; 521 } 522 523 if (op == Op_MachProj || op == Op_Catch) continue; 524 if (store->needs_anti_dependence_check()) continue; // not really a store 525 526 // Compute the alias index. Loads and stores with different alias 527 // indices do not need anti-dependence edges. Wide MemBar's are 528 // anti-dependent on everything (except immutable memories). 529 const TypePtr* adr_type = store->adr_type(); 530 if (!C->can_alias(adr_type, load_alias_idx)) continue; 531 532 // Most slow-path runtime calls do NOT modify Java memory, but 533 // they can block and so write Raw memory. 534 if (store->is_Mach()) { 535 MachNode* mstore = store->as_Mach(); 536 if (load_alias_idx != Compile::AliasIdxRaw) { 537 // Check for call into the runtime using the Java calling 538 // convention (and from there into a wrapper); it has no 539 // _method. Can't do this optimization for Native calls because 540 // they CAN write to Java memory. 541 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 542 assert(mstore->is_MachSafePoint(), ""); 543 MachSafePointNode* ms = (MachSafePointNode*) mstore; 544 assert(ms->is_MachCallJava(), ""); 545 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 546 if (mcj->_method == NULL) { 547 // These runtime calls do not write to Java visible memory 548 // (other than Raw) and so do not require anti-dependence edges. 549 continue; 550 } 551 } 552 // Same for SafePoints: they read/write Raw but only read otherwise. 553 // This is basically a workaround for SafePoints only defining control 554 // instead of control + memory. 555 if (mstore->ideal_Opcode() == Op_SafePoint) 556 continue; 557 } else { 558 // Some raw memory, such as the load of "top" at an allocation, 559 // can be control dependent on the previous safepoint. See 560 // comments in GraphKit::allocate_heap() about control input. 561 // Inserting an anti-dep between such a safepoint and a use 562 // creates a cycle, and will cause a subsequent failure in 563 // local scheduling. (BugId 4919904) 564 // (%%% How can a control input be a safepoint and not a projection??) 565 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 566 continue; 567 } 568 } 569 570 // Identify a block that the current load must be above, 571 // or else observe that 'store' is all the way up in the 572 // earliest legal block for 'load'. In the latter case, 573 // immediately insert an anti-dependence edge. 574 Block* store_block = _bbs[store->_idx]; 575 assert(store_block != NULL, "unused killing projections skipped above"); 576 577 if (store->is_Phi()) { 578 // 'load' uses memory which is one (or more) of the Phi's inputs. 579 // It must be scheduled not before the Phi, but rather before 580 // each of the relevant Phi inputs. 581 // 582 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 583 // we mark each corresponding predecessor block and do a combined 584 // hoisting operation later (raise_LCA_above_marks). 585 // 586 // Do not assert(store_block != early, "Phi merging memory after access") 587 // PhiNode may be at start of block 'early' with backedge to 'early' 588 DEBUG_ONLY(bool found_match = false); 589 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 590 if (store->in(j) == mem) { // Found matching input? 591 DEBUG_ONLY(found_match = true); 592 Block* pred_block = _bbs[store_block->pred(j)->_idx]; 593 if (pred_block != early) { 594 // If any predecessor of the Phi matches the load's "early block", 595 // we do not need a precedence edge between the Phi and 'load' 596 // since the load will be forced into a block preceeding the Phi. 597 pred_block->set_raise_LCA_mark(load_index); 598 assert(!LCA_orig->dominates(pred_block) || 599 early->dominates(pred_block), "early is high enough"); 600 must_raise_LCA = true; 601 } 602 } 603 } 604 assert(found_match, "no worklist bug"); 605 #ifdef TRACK_PHI_INPUTS 606 #ifdef ASSERT 607 // This assert asks about correct handling of PhiNodes, which may not 608 // have all input edges directly from 'mem'. See BugId 4621264 609 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 610 // Increment by exactly one even if there are multiple copies of 'mem' 611 // coming into the phi, because we will run this block several times 612 // if there are several copies of 'mem'. (That's how DU iterators work.) 613 phi_inputs.at_put(store->_idx, num_mem_inputs); 614 assert(PhiNode::Input + num_mem_inputs < store->req(), 615 "Expect at least one phi input will not be from original memory state"); 616 #endif //ASSERT 617 #endif //TRACK_PHI_INPUTS 618 } else if (store_block != early) { 619 // 'store' is between the current LCA and earliest possible block. 620 // Label its block, and decide later on how to raise the LCA 621 // to include the effect on LCA of this store. 622 // If this store's block gets chosen as the raised LCA, we 623 // will find him on the non_early_stores list and stick him 624 // with a precedence edge. 625 // (But, don't bother if LCA is already raised all the way.) 626 if (LCA != early) { 627 store_block->set_raise_LCA_mark(load_index); 628 must_raise_LCA = true; 629 non_early_stores.push(store); 630 } 631 } else { 632 // Found a possibly-interfering store in the load's 'early' block. 633 // This means 'load' cannot sink at all in the dominator tree. 634 // Add an anti-dep edge, and squeeze 'load' into the highest block. 635 assert(store != load->in(0), "dependence cycle found"); 636 if (verify) { 637 assert(store->find_edge(load) != -1, "missing precedence edge"); 638 } else { 639 store->add_prec(load); 640 } 641 LCA = early; 642 // This turns off the process of gathering non_early_stores. 643 } 644 } 645 // (Worklist is now empty; all nearby stores have been visited.) 646 647 // Finished if 'load' must be scheduled in its 'early' block. 648 // If we found any stores there, they have already been given 649 // precedence edges. 650 if (LCA == early) return LCA; 651 652 // We get here only if there are no possibly-interfering stores 653 // in the load's 'early' block. Move LCA up above all predecessors 654 // which contain stores we have noted. 655 // 656 // The raised LCA block can be a home to such interfering stores, 657 // but its predecessors must not contain any such stores. 658 // 659 // The raised LCA will be a lower bound for placing the load, 660 // preventing the load from sinking past any block containing 661 // a store that may invalidate the memory state required by 'load'. 662 if (must_raise_LCA) 663 LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs); 664 if (LCA == early) return LCA; 665 666 // Insert anti-dependence edges from 'load' to each store 667 // in the non-early LCA block. 668 // Mine the non_early_stores list for such stores. 669 if (LCA->raise_LCA_mark() == load_index) { 670 while (non_early_stores.size() > 0) { 671 Node* store = non_early_stores.pop(); 672 Block* store_block = _bbs[store->_idx]; 673 if (store_block == LCA) { 674 // add anti_dependence from store to load in its own block 675 assert(store != load->in(0), "dependence cycle found"); 676 if (verify) { 677 assert(store->find_edge(load) != -1, "missing precedence edge"); 678 } else { 679 store->add_prec(load); 680 } 681 } else { 682 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 683 // Any other stores we found must be either inside the new LCA 684 // or else outside the original LCA. In the latter case, they 685 // did not interfere with any use of 'load'. 686 assert(LCA->dominates(store_block) 687 || !LCA_orig->dominates(store_block), "no stray stores"); 688 } 689 } 690 } 691 692 // Return the highest block containing stores; any stores 693 // within that block have been given anti-dependence edges. 694 return LCA; 695 } 696 697 // This class is used to iterate backwards over the nodes in the graph. 698 699 class Node_Backward_Iterator { 700 701 private: 702 Node_Backward_Iterator(); 703 704 public: 705 // Constructor for the iterator 706 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs); 707 708 // Postincrement operator to iterate over the nodes 709 Node *next(); 710 711 private: 712 VectorSet &_visited; 713 Node_List &_stack; 714 Block_Array &_bbs; 715 }; 716 717 // Constructor for the Node_Backward_Iterator 718 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs ) 719 : _visited(visited), _stack(stack), _bbs(bbs) { 720 // The stack should contain exactly the root 721 stack.clear(); 722 stack.push(root); 723 724 // Clear the visited bits 725 visited.Clear(); 726 } 727 728 // Iterator for the Node_Backward_Iterator 729 Node *Node_Backward_Iterator::next() { 730 731 // If the _stack is empty, then just return NULL: finished. 732 if ( !_stack.size() ) 733 return NULL; 734 735 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 736 // made stateless, so I do not need to record the index 'i' on my _stack. 737 // Instead I visit all users each time, scanning for unvisited users. 738 // I visit unvisited not-anti-dependence users first, then anti-dependent 739 // children next. 740 Node *self = _stack.pop(); 741 742 // I cycle here when I am entering a deeper level of recursion. 743 // The key variable 'self' was set prior to jumping here. 744 while( 1 ) { 745 746 _visited.set(self->_idx); 747 748 // Now schedule all uses as late as possible. 749 uint src = self->is_Proj() ? self->in(0)->_idx : self->_idx; 750 uint src_rpo = _bbs[src]->_rpo; 751 752 // Schedule all nodes in a post-order visit 753 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 754 755 // Scan for unvisited nodes 756 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 757 // For all uses, schedule late 758 Node* n = self->fast_out(i); // Use 759 760 // Skip already visited children 761 if ( _visited.test(n->_idx) ) 762 continue; 763 764 // do not traverse backward control edges 765 Node *use = n->is_Proj() ? n->in(0) : n; 766 uint use_rpo = _bbs[use->_idx]->_rpo; 767 768 if ( use_rpo < src_rpo ) 769 continue; 770 771 // Phi nodes always precede uses in a basic block 772 if ( use_rpo == src_rpo && use->is_Phi() ) 773 continue; 774 775 unvisited = n; // Found unvisited 776 777 // Check for possible-anti-dependent 778 if( !n->needs_anti_dependence_check() ) 779 break; // Not visited, not anti-dep; schedule it NOW 780 } 781 782 // Did I find an unvisited not-anti-dependent Node? 783 if ( !unvisited ) 784 break; // All done with children; post-visit 'self' 785 786 // Visit the unvisited Node. Contains the obvious push to 787 // indicate I'm entering a deeper level of recursion. I push the 788 // old state onto the _stack and set a new state and loop (recurse). 789 _stack.push(self); 790 self = unvisited; 791 } // End recursion loop 792 793 return self; 794 } 795 796 //------------------------------ComputeLatenciesBackwards---------------------- 797 // Compute the latency of all the instructions. 798 void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) { 799 #ifndef PRODUCT 800 if (trace_opto_pipelining()) 801 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 802 #endif 803 804 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); 805 Node *n; 806 807 // Walk over all the nodes from last to first 808 while (n = iter.next()) { 809 // Set the latency for the definitions of this instruction 810 partial_latency_of_defs(n); 811 } 812 } // end ComputeLatenciesBackwards 813 814 //------------------------------partial_latency_of_defs------------------------ 815 // Compute the latency impact of this node on all defs. This computes 816 // a number that increases as we approach the beginning of the routine. 817 void PhaseCFG::partial_latency_of_defs(Node *n) { 818 // Set the latency for this instruction 819 #ifndef PRODUCT 820 if (trace_opto_pipelining()) { 821 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", 822 n->_idx, _node_latency.at_grow(n->_idx)); 823 dump(); 824 } 825 #endif 826 827 if (n->is_Proj()) 828 n = n->in(0); 829 830 if (n->is_Root()) 831 return; 832 833 uint nlen = n->len(); 834 uint use_latency = _node_latency.at_grow(n->_idx); 835 uint use_pre_order = _bbs[n->_idx]->_pre_order; 836 837 for ( uint j=0; j<nlen; j++ ) { 838 Node *def = n->in(j); 839 840 if (!def || def == n) 841 continue; 842 843 // Walk backwards thru projections 844 if (def->is_Proj()) 845 def = def->in(0); 846 847 #ifndef PRODUCT 848 if (trace_opto_pipelining()) { 849 tty->print("# in(%2d): ", j); 850 def->dump(); 851 } 852 #endif 853 854 // If the defining block is not known, assume it is ok 855 Block *def_block = _bbs[def->_idx]; 856 uint def_pre_order = def_block ? def_block->_pre_order : 0; 857 858 if ( (use_pre_order < def_pre_order) || 859 (use_pre_order == def_pre_order && n->is_Phi()) ) 860 continue; 861 862 uint delta_latency = n->latency(j); 863 uint current_latency = delta_latency + use_latency; 864 865 if (_node_latency.at_grow(def->_idx) < current_latency) { 866 _node_latency.at_put_grow(def->_idx, current_latency); 867 } 868 869 #ifndef PRODUCT 870 if (trace_opto_pipelining()) { 871 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", 872 use_latency, j, delta_latency, current_latency, def->_idx, 873 _node_latency.at_grow(def->_idx)); 874 } 875 #endif 876 } 877 } 878 879 //------------------------------latency_from_use------------------------------- 880 // Compute the latency of a specific use 881 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 882 // If self-reference, return no latency 883 if (use == n || use->is_Root()) 884 return 0; 885 886 uint def_pre_order = _bbs[def->_idx]->_pre_order; 887 uint latency = 0; 888 889 // If the use is not a projection, then it is simple... 890 if (!use->is_Proj()) { 891 #ifndef PRODUCT 892 if (trace_opto_pipelining()) { 893 tty->print("# out(): "); 894 use->dump(); 895 } 896 #endif 897 898 uint use_pre_order = _bbs[use->_idx]->_pre_order; 899 900 if (use_pre_order < def_pre_order) 901 return 0; 902 903 if (use_pre_order == def_pre_order && use->is_Phi()) 904 return 0; 905 906 uint nlen = use->len(); 907 uint nl = _node_latency.at_grow(use->_idx); 908 909 for ( uint j=0; j<nlen; j++ ) { 910 if (use->in(j) == n) { 911 // Change this if we want local latencies 912 uint ul = use->latency(j); 913 uint l = ul + nl; 914 if (latency < l) latency = l; 915 #ifndef PRODUCT 916 if (trace_opto_pipelining()) { 917 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 918 nl, j, ul, l, latency); 919 } 920 #endif 921 } 922 } 923 } else { 924 // This is a projection, just grab the latency of the use(s) 925 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 926 uint l = latency_from_use(use, def, use->fast_out(j)); 927 if (latency < l) latency = l; 928 } 929 } 930 931 return latency; 932 } 933 934 //------------------------------latency_from_uses------------------------------ 935 // Compute the latency of this instruction relative to all of it's uses. 936 // This computes a number that increases as we approach the beginning of the 937 // routine. 938 void PhaseCFG::latency_from_uses(Node *n) { 939 // Set the latency for this instruction 940 #ifndef PRODUCT 941 if (trace_opto_pipelining()) { 942 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", 943 n->_idx, _node_latency.at_grow(n->_idx)); 944 dump(); 945 } 946 #endif 947 uint latency=0; 948 const Node *def = n->is_Proj() ? n->in(0): n; 949 950 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 951 uint l = latency_from_use(n, def, n->fast_out(i)); 952 953 if (latency < l) latency = l; 954 } 955 956 _node_latency.at_put_grow(n->_idx, latency); 957 } 958 959 //------------------------------hoist_to_cheaper_block------------------------- 960 // Pick a block for node self, between early and LCA, that is a cheaper 961 // alternative to LCA. 962 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 963 const double delta = 1+PROB_UNLIKELY_MAG(4); 964 Block* least = LCA; 965 double least_freq = least->_freq; 966 uint target = _node_latency.at_grow(self->_idx); 967 uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx); 968 uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx); 969 bool in_latency = (target <= start_latency); 970 const Block* root_block = _bbs[_root->_idx]; 971 972 // Turn off latency scheduling if scheduling is just plain off 973 if (!C->do_scheduling()) 974 in_latency = true; 975 976 // Do not hoist (to cover latency) instructions which target a 977 // single register. Hoisting stretches the live range of the 978 // single register and may force spilling. 979 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 980 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 981 in_latency = true; 982 983 #ifndef PRODUCT 984 if (trace_opto_pipelining()) { 985 tty->print("# Find cheaper block for latency %d: ", 986 _node_latency.at_grow(self->_idx)); 987 self->dump(); 988 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 989 LCA->_pre_order, 990 LCA->_nodes[0]->_idx, 991 start_latency, 992 LCA->_nodes[LCA->end_idx()]->_idx, 993 end_latency, 994 least_freq); 995 } 996 #endif 997 998 // Walk up the dominator tree from LCA (Lowest common ancestor) to 999 // the earliest legal location. Capture the least execution frequency. 1000 while (LCA != early) { 1001 LCA = LCA->_idom; // Follow up the dominator tree 1002 1003 if (LCA == NULL) { 1004 // Bailout without retry 1005 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1006 return least; 1007 } 1008 1009 // Don't hoist machine instructions to the root basic block 1010 if (mach && LCA == root_block) 1011 break; 1012 1013 uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx); 1014 uint end_idx = LCA->end_idx(); 1015 uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx); 1016 double LCA_freq = LCA->_freq; 1017 #ifndef PRODUCT 1018 if (trace_opto_pipelining()) { 1019 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1020 LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq); 1021 } 1022 #endif 1023 if (LCA_freq < least_freq || // Better Frequency 1024 ( !in_latency && // No block containing latency 1025 LCA_freq < least_freq * delta && // No worse frequency 1026 target >= end_lat && // within latency range 1027 !self->is_iteratively_computed() ) // But don't hoist IV increments 1028 // because they may end up above other uses of their phi forcing 1029 // their result register to be different from their input. 1030 ) { 1031 least = LCA; // Found cheaper block 1032 least_freq = LCA_freq; 1033 start_latency = start_lat; 1034 end_latency = end_lat; 1035 if (target <= start_lat) 1036 in_latency = true; 1037 } 1038 } 1039 1040 #ifndef PRODUCT 1041 if (trace_opto_pipelining()) { 1042 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1043 least->_pre_order, start_latency, least_freq); 1044 } 1045 #endif 1046 1047 // See if the latency needs to be updated 1048 if (target < end_latency) { 1049 #ifndef PRODUCT 1050 if (trace_opto_pipelining()) { 1051 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1052 } 1053 #endif 1054 _node_latency.at_put_grow(self->_idx, end_latency); 1055 partial_latency_of_defs(self); 1056 } 1057 1058 return least; 1059 } 1060 1061 1062 //------------------------------schedule_late----------------------------------- 1063 // Now schedule all codes as LATE as possible. This is the LCA in the 1064 // dominator tree of all USES of a value. Pick the block with the least 1065 // loop nesting depth that is lowest in the dominator tree. 1066 extern const char must_clone[]; 1067 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1068 #ifndef PRODUCT 1069 if (trace_opto_pipelining()) 1070 tty->print("\n#---- schedule_late ----\n"); 1071 #endif 1072 1073 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); 1074 Node *self; 1075 1076 // Walk over all the nodes from last to first 1077 while (self = iter.next()) { 1078 Block* early = _bbs[self->_idx]; // Earliest legal placement 1079 1080 if (self->is_top()) { 1081 // Top node goes in bb #2 with other constants. 1082 // It must be special-cased, because it has no out edges. 1083 early->add_inst(self); 1084 continue; 1085 } 1086 1087 // No uses, just terminate 1088 if (self->outcnt() == 0) { 1089 assert(self->Opcode() == Op_MachProj, "sanity"); 1090 continue; // Must be a dead machine projection 1091 } 1092 1093 // If node is pinned in the block, then no scheduling can be done. 1094 if( self->pinned() ) // Pinned in block? 1095 continue; 1096 1097 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1098 if (mach) { 1099 switch (mach->ideal_Opcode()) { 1100 case Op_CreateEx: 1101 // Don't move exception creation 1102 early->add_inst(self); 1103 continue; 1104 break; 1105 case Op_CheckCastPP: 1106 // Don't move CheckCastPP nodes away from their input, if the input 1107 // is a rawptr (5071820). 1108 Node *def = self->in(1); 1109 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1110 early->add_inst(self); 1111 continue; 1112 } 1113 break; 1114 } 1115 } 1116 1117 // Gather LCA of all uses 1118 Block *LCA = NULL; 1119 { 1120 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1121 // For all uses, find LCA 1122 Node* use = self->fast_out(i); 1123 LCA = raise_LCA_above_use(LCA, use, self, _bbs); 1124 } 1125 } // (Hide defs of imax, i from rest of block.) 1126 1127 // Place temps in the block of their use. This isn't a 1128 // requirement for correctness but it reduces useless 1129 // interference between temps and other nodes. 1130 if (mach != NULL && mach->is_MachTemp()) { 1131 _bbs.map(self->_idx, LCA); 1132 LCA->add_inst(self); 1133 continue; 1134 } 1135 1136 // Check if 'self' could be anti-dependent on memory 1137 if (self->needs_anti_dependence_check()) { 1138 // Hoist LCA above possible-defs and insert anti-dependences to 1139 // defs in new LCA block. 1140 LCA = insert_anti_dependences(LCA, self); 1141 } 1142 1143 if (early->_dom_depth > LCA->_dom_depth) { 1144 // Somehow the LCA has moved above the earliest legal point. 1145 // (One way this can happen is via memory_early_block.) 1146 if (C->subsume_loads() == true && !C->failing()) { 1147 // Retry with subsume_loads == false 1148 // If this is the first failure, the sentinel string will "stick" 1149 // to the Compile object, and the C2Compiler will see it and retry. 1150 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1151 } else { 1152 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1153 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1154 } 1155 return; 1156 } 1157 1158 // If there is no opportunity to hoist, then we're done. 1159 bool try_to_hoist = (LCA != early); 1160 1161 // Must clone guys stay next to use; no hoisting allowed. 1162 // Also cannot hoist guys that alter memory or are otherwise not 1163 // allocatable (hoisting can make a value live longer, leading to 1164 // anti and output dependency problems which are normally resolved 1165 // by the register allocator giving everyone a different register). 1166 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1167 try_to_hoist = false; 1168 1169 Block* late = NULL; 1170 if (try_to_hoist) { 1171 // Now find the block with the least execution frequency. 1172 // Start at the latest schedule and work up to the earliest schedule 1173 // in the dominator tree. Thus the Node will dominate all its uses. 1174 late = hoist_to_cheaper_block(LCA, early, self); 1175 } else { 1176 // Just use the LCA of the uses. 1177 late = LCA; 1178 } 1179 1180 // Put the node into target block 1181 schedule_node_into_block(self, late); 1182 1183 #ifdef ASSERT 1184 if (self->needs_anti_dependence_check()) { 1185 // since precedence edges are only inserted when we're sure they 1186 // are needed make sure that after placement in a block we don't 1187 // need any new precedence edges. 1188 verify_anti_dependences(late, self); 1189 } 1190 #endif 1191 } // Loop until all nodes have been visited 1192 1193 } // end ScheduleLate 1194 1195 //------------------------------GlobalCodeMotion------------------------------- 1196 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) { 1197 ResourceMark rm; 1198 1199 #ifndef PRODUCT 1200 if (trace_opto_pipelining()) { 1201 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1202 } 1203 #endif 1204 1205 // Initialize the bbs.map for things on the proj_list 1206 uint i; 1207 for( i=0; i < proj_list.size(); i++ ) 1208 _bbs.map(proj_list[i]->_idx, NULL); 1209 1210 // Set the basic block for Nodes pinned into blocks 1211 Arena *a = Thread::current()->resource_area(); 1212 VectorSet visited(a); 1213 schedule_pinned_nodes( visited ); 1214 1215 // Find the earliest Block any instruction can be placed in. Some 1216 // instructions are pinned into Blocks. Unpinned instructions can 1217 // appear in last block in which all their inputs occur. 1218 visited.Clear(); 1219 Node_List stack(a); 1220 stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list 1221 if (!schedule_early(visited, stack)) { 1222 // Bailout without retry 1223 C->record_method_not_compilable("early schedule failed"); 1224 return; 1225 } 1226 1227 // Build Def-Use edges. 1228 proj_list.push(_root); // Add real root as another root 1229 proj_list.pop(); 1230 1231 // Compute the latency information (via backwards walk) for all the 1232 // instructions in the graph 1233 GrowableArray<uint> node_latency; 1234 _node_latency = node_latency; 1235 1236 if( C->do_scheduling() ) 1237 ComputeLatenciesBackwards(visited, stack); 1238 1239 // Now schedule all codes as LATE as possible. This is the LCA in the 1240 // dominator tree of all USES of a value. Pick the block with the least 1241 // loop nesting depth that is lowest in the dominator tree. 1242 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1243 schedule_late(visited, stack); 1244 if( C->failing() ) { 1245 // schedule_late fails only when graph is incorrect. 1246 assert(!VerifyGraphEdges, "verification should have failed"); 1247 return; 1248 } 1249 1250 unique = C->unique(); 1251 1252 #ifndef PRODUCT 1253 if (trace_opto_pipelining()) { 1254 tty->print("\n---- Detect implicit null checks ----\n"); 1255 } 1256 #endif 1257 1258 // Detect implicit-null-check opportunities. Basically, find NULL checks 1259 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1260 // I can generate a memory op if there is not one nearby. 1261 if (C->is_method_compilation()) { 1262 // Don't do it for natives, adapters, or runtime stubs 1263 int allowed_reasons = 0; 1264 // ...and don't do it when there have been too many traps, globally. 1265 for (int reason = (int)Deoptimization::Reason_none+1; 1266 reason < Compile::trapHistLength; reason++) { 1267 assert(reason < BitsPerInt, "recode bit map"); 1268 if (!C->too_many_traps((Deoptimization::DeoptReason) reason)) 1269 allowed_reasons |= nth_bit(reason); 1270 } 1271 // By reversing the loop direction we get a very minor gain on mpegaudio. 1272 // Feel free to revert to a forward loop for clarity. 1273 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1274 for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) { 1275 Node *proj = matcher._null_check_tests[i ]; 1276 Node *val = matcher._null_check_tests[i+1]; 1277 _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons); 1278 // The implicit_null_check will only perform the transformation 1279 // if the null branch is truly uncommon, *and* it leads to an 1280 // uncommon trap. Combined with the too_many_traps guards 1281 // above, this prevents SEGV storms reported in 6366351, 1282 // by recompiling offending methods without this optimization. 1283 } 1284 } 1285 1286 #ifndef PRODUCT 1287 if (trace_opto_pipelining()) { 1288 tty->print("\n---- Start Local Scheduling ----\n"); 1289 } 1290 #endif 1291 1292 // Schedule locally. Right now a simple topological sort. 1293 // Later, do a real latency aware scheduler. 1294 int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique()); 1295 memset( ready_cnt, -1, C->unique() * sizeof(int) ); 1296 visited.Clear(); 1297 for (i = 0; i < _num_blocks; i++) { 1298 if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) { 1299 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1300 C->record_method_not_compilable("local schedule failed"); 1301 } 1302 return; 1303 } 1304 } 1305 1306 // If we inserted any instructions between a Call and his CatchNode, 1307 // clone the instructions on all paths below the Catch. 1308 for( i=0; i < _num_blocks; i++ ) 1309 _blocks[i]->call_catch_cleanup(_bbs); 1310 1311 #ifndef PRODUCT 1312 if (trace_opto_pipelining()) { 1313 tty->print("\n---- After GlobalCodeMotion ----\n"); 1314 for (uint i = 0; i < _num_blocks; i++) { 1315 _blocks[i]->dump(); 1316 } 1317 } 1318 #endif 1319 } 1320 1321 1322 //------------------------------Estimate_Block_Frequency----------------------- 1323 // Estimate block frequencies based on IfNode probabilities. 1324 void PhaseCFG::Estimate_Block_Frequency() { 1325 1326 // Force conditional branches leading to uncommon traps to be unlikely, 1327 // not because we get to the uncommon_trap with less relative frequency, 1328 // but because an uncommon_trap typically causes a deopt, so we only get 1329 // there once. 1330 if (C->do_freq_based_layout()) { 1331 Block_List worklist; 1332 Block* root_blk = _blocks[0]; 1333 for (uint i = 1; i < root_blk->num_preds(); i++) { 1334 Block *pb = _bbs[root_blk->pred(i)->_idx]; 1335 if (pb->has_uncommon_code()) { 1336 worklist.push(pb); 1337 } 1338 } 1339 while (worklist.size() > 0) { 1340 Block* uct = worklist.pop(); 1341 if (uct == _broot) continue; 1342 for (uint i = 1; i < uct->num_preds(); i++) { 1343 Block *pb = _bbs[uct->pred(i)->_idx]; 1344 if (pb->_num_succs == 1) { 1345 worklist.push(pb); 1346 } else if (pb->num_fall_throughs() == 2) { 1347 pb->update_uncommon_branch(uct); 1348 } 1349 } 1350 } 1351 } 1352 1353 // Create the loop tree and calculate loop depth. 1354 _root_loop = create_loop_tree(); 1355 _root_loop->compute_loop_depth(0); 1356 1357 // Compute block frequency of each block, relative to a single loop entry. 1358 _root_loop->compute_freq(); 1359 1360 // Adjust all frequencies to be relative to a single method entry 1361 _root_loop->_freq = 1.0; 1362 _root_loop->scale_freq(); 1363 1364 // force paths ending at uncommon traps to be infrequent 1365 if (!C->do_freq_based_layout()) { 1366 Block_List worklist; 1367 Block* root_blk = _blocks[0]; 1368 for (uint i = 1; i < root_blk->num_preds(); i++) { 1369 Block *pb = _bbs[root_blk->pred(i)->_idx]; 1370 if (pb->has_uncommon_code()) { 1371 worklist.push(pb); 1372 } 1373 } 1374 while (worklist.size() > 0) { 1375 Block* uct = worklist.pop(); 1376 uct->_freq = PROB_MIN; 1377 for (uint i = 1; i < uct->num_preds(); i++) { 1378 Block *pb = _bbs[uct->pred(i)->_idx]; 1379 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1380 worklist.push(pb); 1381 } 1382 } 1383 } 1384 } 1385 1386 #ifdef ASSERT 1387 for (uint i = 0; i < _num_blocks; i++ ) { 1388 Block *b = _blocks[i]; 1389 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requiers meaningful block frequency"); 1390 } 1391 #endif 1392 1393 #ifndef PRODUCT 1394 if (PrintCFGBlockFreq) { 1395 tty->print_cr("CFG Block Frequencies"); 1396 _root_loop->dump_tree(); 1397 if (Verbose) { 1398 tty->print_cr("PhaseCFG dump"); 1399 dump(); 1400 tty->print_cr("Node dump"); 1401 _root->dump(99999); 1402 } 1403 } 1404 #endif 1405 } 1406 1407 //----------------------------create_loop_tree-------------------------------- 1408 // Create a loop tree from the CFG 1409 CFGLoop* PhaseCFG::create_loop_tree() { 1410 1411 #ifdef ASSERT 1412 assert( _blocks[0] == _broot, "" ); 1413 for (uint i = 0; i < _num_blocks; i++ ) { 1414 Block *b = _blocks[i]; 1415 // Check that _loop field are clear...we could clear them if not. 1416 assert(b->_loop == NULL, "clear _loop expected"); 1417 // Sanity check that the RPO numbering is reflected in the _blocks array. 1418 // It doesn't have to be for the loop tree to be built, but if it is not, 1419 // then the blocks have been reordered since dom graph building...which 1420 // may question the RPO numbering 1421 assert(b->_rpo == i, "unexpected reverse post order number"); 1422 } 1423 #endif 1424 1425 int idct = 0; 1426 CFGLoop* root_loop = new CFGLoop(idct++); 1427 1428 Block_List worklist; 1429 1430 // Assign blocks to loops 1431 for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block 1432 Block *b = _blocks[i]; 1433 1434 if (b->head()->is_Loop()) { 1435 Block* loop_head = b; 1436 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1437 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1438 Block* tail = _bbs[tail_n->_idx]; 1439 1440 // Defensively filter out Loop nodes for non-single-entry loops. 1441 // For all reasonable loops, the head occurs before the tail in RPO. 1442 if (i <= tail->_rpo) { 1443 1444 // The tail and (recursive) predecessors of the tail 1445 // are made members of a new loop. 1446 1447 assert(worklist.size() == 0, "nonempty worklist"); 1448 CFGLoop* nloop = new CFGLoop(idct++); 1449 assert(loop_head->_loop == NULL, "just checking"); 1450 loop_head->_loop = nloop; 1451 // Add to nloop so push_pred() will skip over inner loops 1452 nloop->add_member(loop_head); 1453 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs); 1454 1455 while (worklist.size() > 0) { 1456 Block* member = worklist.pop(); 1457 if (member != loop_head) { 1458 for (uint j = 1; j < member->num_preds(); j++) { 1459 nloop->push_pred(member, j, worklist, _bbs); 1460 } 1461 } 1462 } 1463 } 1464 } 1465 } 1466 1467 // Create a member list for each loop consisting 1468 // of both blocks and (immediate child) loops. 1469 for (uint i = 0; i < _num_blocks; i++) { 1470 Block *b = _blocks[i]; 1471 CFGLoop* lp = b->_loop; 1472 if (lp == NULL) { 1473 // Not assigned to a loop. Add it to the method's pseudo loop. 1474 b->_loop = root_loop; 1475 lp = root_loop; 1476 } 1477 if (lp == root_loop || b != lp->head()) { // loop heads are already members 1478 lp->add_member(b); 1479 } 1480 if (lp != root_loop) { 1481 if (lp->parent() == NULL) { 1482 // Not a nested loop. Make it a child of the method's pseudo loop. 1483 root_loop->add_nested_loop(lp); 1484 } 1485 if (b == lp->head()) { 1486 // Add nested loop to member list of parent loop. 1487 lp->parent()->add_member(lp); 1488 } 1489 } 1490 } 1491 1492 return root_loop; 1493 } 1494 1495 //------------------------------push_pred-------------------------------------- 1496 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) { 1497 Node* pred_n = blk->pred(i); 1498 Block* pred = node_to_blk[pred_n->_idx]; 1499 CFGLoop *pred_loop = pred->_loop; 1500 if (pred_loop == NULL) { 1501 // Filter out blocks for non-single-entry loops. 1502 // For all reasonable loops, the head occurs before the tail in RPO. 1503 if (pred->_rpo > head()->_rpo) { 1504 pred->_loop = this; 1505 worklist.push(pred); 1506 } 1507 } else if (pred_loop != this) { 1508 // Nested loop. 1509 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1510 pred_loop = pred_loop->_parent; 1511 } 1512 // Make pred's loop be a child 1513 if (pred_loop->_parent == NULL) { 1514 add_nested_loop(pred_loop); 1515 // Continue with loop entry predecessor. 1516 Block* pred_head = pred_loop->head(); 1517 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1518 assert(pred_head != head(), "loop head in only one loop"); 1519 push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk); 1520 } else { 1521 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1522 } 1523 } 1524 } 1525 1526 //------------------------------add_nested_loop-------------------------------- 1527 // Make cl a child of the current loop in the loop tree. 1528 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1529 assert(_parent == NULL, "no parent yet"); 1530 assert(cl != this, "not my own parent"); 1531 cl->_parent = this; 1532 CFGLoop* ch = _child; 1533 if (ch == NULL) { 1534 _child = cl; 1535 } else { 1536 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1537 ch->_sibling = cl; 1538 } 1539 } 1540 1541 //------------------------------compute_loop_depth----------------------------- 1542 // Store the loop depth in each CFGLoop object. 1543 // Recursively walk the children to do the same for them. 1544 void CFGLoop::compute_loop_depth(int depth) { 1545 _depth = depth; 1546 CFGLoop* ch = _child; 1547 while (ch != NULL) { 1548 ch->compute_loop_depth(depth + 1); 1549 ch = ch->_sibling; 1550 } 1551 } 1552 1553 //------------------------------compute_freq----------------------------------- 1554 // Compute the frequency of each block and loop, relative to a single entry 1555 // into the dominating loop head. 1556 void CFGLoop::compute_freq() { 1557 // Bottom up traversal of loop tree (visit inner loops first.) 1558 // Set loop head frequency to 1.0, then transitively 1559 // compute frequency for all successors in the loop, 1560 // as well as for each exit edge. Inner loops are 1561 // treated as single blocks with loop exit targets 1562 // as the successor blocks. 1563 1564 // Nested loops first 1565 CFGLoop* ch = _child; 1566 while (ch != NULL) { 1567 ch->compute_freq(); 1568 ch = ch->_sibling; 1569 } 1570 assert (_members.length() > 0, "no empty loops"); 1571 Block* hd = head(); 1572 hd->_freq = 1.0f; 1573 for (int i = 0; i < _members.length(); i++) { 1574 CFGElement* s = _members.at(i); 1575 float freq = s->_freq; 1576 if (s->is_block()) { 1577 Block* b = s->as_Block(); 1578 for (uint j = 0; j < b->_num_succs; j++) { 1579 Block* sb = b->_succs[j]; 1580 update_succ_freq(sb, freq * b->succ_prob(j)); 1581 } 1582 } else { 1583 CFGLoop* lp = s->as_CFGLoop(); 1584 assert(lp->_parent == this, "immediate child"); 1585 for (int k = 0; k < lp->_exits.length(); k++) { 1586 Block* eb = lp->_exits.at(k).get_target(); 1587 float prob = lp->_exits.at(k).get_prob(); 1588 update_succ_freq(eb, freq * prob); 1589 } 1590 } 1591 } 1592 1593 // For all loops other than the outer, "method" loop, 1594 // sum and normalize the exit probability. The "method" loop 1595 // should keep the initial exit probability of 1, so that 1596 // inner blocks do not get erroneously scaled. 1597 if (_depth != 0) { 1598 // Total the exit probabilities for this loop. 1599 float exits_sum = 0.0f; 1600 for (int i = 0; i < _exits.length(); i++) { 1601 exits_sum += _exits.at(i).get_prob(); 1602 } 1603 1604 // Normalize the exit probabilities. Until now, the 1605 // probabilities estimate the possibility of exit per 1606 // a single loop iteration; afterward, they estimate 1607 // the probability of exit per loop entry. 1608 for (int i = 0; i < _exits.length(); i++) { 1609 Block* et = _exits.at(i).get_target(); 1610 float new_prob = 0.0f; 1611 if (_exits.at(i).get_prob() > 0.0f) { 1612 new_prob = _exits.at(i).get_prob() / exits_sum; 1613 } 1614 BlockProbPair bpp(et, new_prob); 1615 _exits.at_put(i, bpp); 1616 } 1617 1618 // Save the total, but guard against unreasonable probability, 1619 // as the value is used to estimate the loop trip count. 1620 // An infinite trip count would blur relative block 1621 // frequencies. 1622 if (exits_sum > 1.0f) exits_sum = 1.0; 1623 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1624 _exit_prob = exits_sum; 1625 } 1626 } 1627 1628 //------------------------------succ_prob------------------------------------- 1629 // Determine the probability of reaching successor 'i' from the receiver block. 1630 float Block::succ_prob(uint i) { 1631 int eidx = end_idx(); 1632 Node *n = _nodes[eidx]; // Get ending Node 1633 1634 int op = n->Opcode(); 1635 if (n->is_Mach()) { 1636 if (n->is_MachNullCheck()) { 1637 // Can only reach here if called after lcm. The original Op_If is gone, 1638 // so we attempt to infer the probability from one or both of the 1639 // successor blocks. 1640 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1641 // If either successor has only one predecessor, then the 1642 // probabiltity estimate can be derived using the 1643 // relative frequency of the successor and this block. 1644 if (_succs[i]->num_preds() == 2) { 1645 return _succs[i]->_freq / _freq; 1646 } else if (_succs[1-i]->num_preds() == 2) { 1647 return 1 - (_succs[1-i]->_freq / _freq); 1648 } else { 1649 // Estimate using both successor frequencies 1650 float freq = _succs[i]->_freq; 1651 return freq / (freq + _succs[1-i]->_freq); 1652 } 1653 } 1654 op = n->as_Mach()->ideal_Opcode(); 1655 } 1656 1657 1658 // Switch on branch type 1659 switch( op ) { 1660 case Op_CountedLoopEnd: 1661 case Op_If: { 1662 assert (i < 2, "just checking"); 1663 // Conditionals pass on only part of their frequency 1664 float prob = n->as_MachIf()->_prob; 1665 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1666 // If succ[i] is the FALSE branch, invert path info 1667 if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) { 1668 return 1.0f - prob; // not taken 1669 } else { 1670 return prob; // taken 1671 } 1672 } 1673 1674 case Op_Jump: 1675 // Divide the frequency between all successors evenly 1676 return 1.0f/_num_succs; 1677 1678 case Op_Catch: { 1679 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); 1680 if (ci->_con == CatchProjNode::fall_through_index) { 1681 // Fall-thru path gets the lion's share. 1682 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1683 } else { 1684 // Presume exceptional paths are equally unlikely 1685 return PROB_UNLIKELY_MAG(5); 1686 } 1687 } 1688 1689 case Op_Root: 1690 case Op_Goto: 1691 // Pass frequency straight thru to target 1692 return 1.0f; 1693 1694 case Op_NeverBranch: 1695 return 0.0f; 1696 1697 case Op_TailCall: 1698 case Op_TailJump: 1699 case Op_Return: 1700 case Op_Halt: 1701 case Op_Rethrow: 1702 // Do not push out freq to root block 1703 return 0.0f; 1704 1705 default: 1706 ShouldNotReachHere(); 1707 } 1708 1709 return 0.0f; 1710 } 1711 1712 //------------------------------num_fall_throughs----------------------------- 1713 // Return the number of fall-through candidates for a block 1714 int Block::num_fall_throughs() { 1715 int eidx = end_idx(); 1716 Node *n = _nodes[eidx]; // Get ending Node 1717 1718 int op = n->Opcode(); 1719 if (n->is_Mach()) { 1720 if (n->is_MachNullCheck()) { 1721 // In theory, either side can fall-thru, for simplicity sake, 1722 // let's say only the false branch can now. 1723 return 1; 1724 } 1725 op = n->as_Mach()->ideal_Opcode(); 1726 } 1727 1728 // Switch on branch type 1729 switch( op ) { 1730 case Op_CountedLoopEnd: 1731 case Op_If: 1732 return 2; 1733 1734 case Op_Root: 1735 case Op_Goto: 1736 return 1; 1737 1738 case Op_Catch: { 1739 for (uint i = 0; i < _num_succs; i++) { 1740 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); 1741 if (ci->_con == CatchProjNode::fall_through_index) { 1742 return 1; 1743 } 1744 } 1745 return 0; 1746 } 1747 1748 case Op_Jump: 1749 case Op_NeverBranch: 1750 case Op_TailCall: 1751 case Op_TailJump: 1752 case Op_Return: 1753 case Op_Halt: 1754 case Op_Rethrow: 1755 return 0; 1756 1757 default: 1758 ShouldNotReachHere(); 1759 } 1760 1761 return 0; 1762 } 1763 1764 //------------------------------succ_fall_through----------------------------- 1765 // Return true if a specific successor could be fall-through target. 1766 bool Block::succ_fall_through(uint i) { 1767 int eidx = end_idx(); 1768 Node *n = _nodes[eidx]; // Get ending Node 1769 1770 int op = n->Opcode(); 1771 if (n->is_Mach()) { 1772 if (n->is_MachNullCheck()) { 1773 // In theory, either side can fall-thru, for simplicity sake, 1774 // let's say only the false branch can now. 1775 return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse; 1776 } 1777 op = n->as_Mach()->ideal_Opcode(); 1778 } 1779 1780 // Switch on branch type 1781 switch( op ) { 1782 case Op_CountedLoopEnd: 1783 case Op_If: 1784 case Op_Root: 1785 case Op_Goto: 1786 return true; 1787 1788 case Op_Catch: { 1789 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); 1790 return ci->_con == CatchProjNode::fall_through_index; 1791 } 1792 1793 case Op_Jump: 1794 case Op_NeverBranch: 1795 case Op_TailCall: 1796 case Op_TailJump: 1797 case Op_Return: 1798 case Op_Halt: 1799 case Op_Rethrow: 1800 return false; 1801 1802 default: 1803 ShouldNotReachHere(); 1804 } 1805 1806 return false; 1807 } 1808 1809 //------------------------------update_uncommon_branch------------------------ 1810 // Update the probability of a two-branch to be uncommon 1811 void Block::update_uncommon_branch(Block* ub) { 1812 int eidx = end_idx(); 1813 Node *n = _nodes[eidx]; // Get ending Node 1814 1815 int op = n->as_Mach()->ideal_Opcode(); 1816 1817 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 1818 assert(num_fall_throughs() == 2, "must be a two way branch block"); 1819 1820 // Which successor is ub? 1821 uint s; 1822 for (s = 0; s <_num_succs; s++) { 1823 if (_succs[s] == ub) break; 1824 } 1825 assert(s < 2, "uncommon successor must be found"); 1826 1827 // If ub is the true path, make the proability small, else 1828 // ub is the false path, and make the probability large 1829 bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse); 1830 1831 // Get existing probability 1832 float p = n->as_MachIf()->_prob; 1833 1834 if (invert) p = 1.0 - p; 1835 if (p > PROB_MIN) { 1836 p = PROB_MIN; 1837 } 1838 if (invert) p = 1.0 - p; 1839 1840 n->as_MachIf()->_prob = p; 1841 } 1842 1843 //------------------------------update_succ_freq------------------------------- 1844 // Update the appropriate frequency associated with block 'b', a succesor of 1845 // a block in this loop. 1846 void CFGLoop::update_succ_freq(Block* b, float freq) { 1847 if (b->_loop == this) { 1848 if (b == head()) { 1849 // back branch within the loop 1850 // Do nothing now, the loop carried frequency will be 1851 // adjust later in scale_freq(). 1852 } else { 1853 // simple branch within the loop 1854 b->_freq += freq; 1855 } 1856 } else if (!in_loop_nest(b)) { 1857 // branch is exit from this loop 1858 BlockProbPair bpp(b, freq); 1859 _exits.append(bpp); 1860 } else { 1861 // branch into nested loop 1862 CFGLoop* ch = b->_loop; 1863 ch->_freq += freq; 1864 } 1865 } 1866 1867 //------------------------------in_loop_nest----------------------------------- 1868 // Determine if block b is in the receiver's loop nest. 1869 bool CFGLoop::in_loop_nest(Block* b) { 1870 int depth = _depth; 1871 CFGLoop* b_loop = b->_loop; 1872 int b_depth = b_loop->_depth; 1873 if (depth == b_depth) { 1874 return true; 1875 } 1876 while (b_depth > depth) { 1877 b_loop = b_loop->_parent; 1878 b_depth = b_loop->_depth; 1879 } 1880 return b_loop == this; 1881 } 1882 1883 //------------------------------scale_freq------------------------------------- 1884 // Scale frequency of loops and blocks by trip counts from outer loops 1885 // Do a top down traversal of loop tree (visit outer loops first.) 1886 void CFGLoop::scale_freq() { 1887 float loop_freq = _freq * trip_count(); 1888 for (int i = 0; i < _members.length(); i++) { 1889 CFGElement* s = _members.at(i); 1890 float block_freq = s->_freq * loop_freq; 1891 if (block_freq < MIN_BLOCK_FREQUENCY) block_freq = MIN_BLOCK_FREQUENCY; 1892 s->_freq = block_freq; 1893 } 1894 CFGLoop* ch = _child; 1895 while (ch != NULL) { 1896 ch->scale_freq(); 1897 ch = ch->_sibling; 1898 } 1899 } 1900 1901 #ifndef PRODUCT 1902 //------------------------------dump_tree-------------------------------------- 1903 void CFGLoop::dump_tree() const { 1904 dump(); 1905 if (_child != NULL) _child->dump_tree(); 1906 if (_sibling != NULL) _sibling->dump_tree(); 1907 } 1908 1909 //------------------------------dump------------------------------------------- 1910 void CFGLoop::dump() const { 1911 for (int i = 0; i < _depth; i++) tty->print(" "); 1912 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 1913 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 1914 for (int i = 0; i < _depth; i++) tty->print(" "); 1915 tty->print(" members:", _id); 1916 int k = 0; 1917 for (int i = 0; i < _members.length(); i++) { 1918 if (k++ >= 6) { 1919 tty->print("\n "); 1920 for (int j = 0; j < _depth+1; j++) tty->print(" "); 1921 k = 0; 1922 } 1923 CFGElement *s = _members.at(i); 1924 if (s->is_block()) { 1925 Block *b = s->as_Block(); 1926 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 1927 } else { 1928 CFGLoop* lp = s->as_CFGLoop(); 1929 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 1930 } 1931 } 1932 tty->print("\n"); 1933 for (int i = 0; i < _depth; i++) tty->print(" "); 1934 tty->print(" exits: "); 1935 k = 0; 1936 for (int i = 0; i < _exits.length(); i++) { 1937 if (k++ >= 7) { 1938 tty->print("\n "); 1939 for (int j = 0; j < _depth+1; j++) tty->print(" "); 1940 k = 0; 1941 } 1942 Block *blk = _exits.at(i).get_target(); 1943 float prob = _exits.at(i).get_prob(); 1944 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 1945 } 1946 tty->print("\n"); 1947 } 1948 #endif