1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)gcm.cpp 1.259 08/07/10 14:40:09 JVM" 3 #endif 4 /* 5 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 // Portions of code courtesy of Clifford Click 29 30 // Optimization - Graph Style 31 32 #include "incls/_precompiled.incl" 33 #include "incls/_gcm.cpp.incl" 34 35 //----------------------------schedule_node_into_block------------------------- 36 // Insert node n into block b. Look for projections of n and make sure they 37 // are in b also. 38 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 39 // Set basic block of n, Add n to b, 40 _bbs.map(n->_idx, b); 41 b->add_inst(n); 42 43 // After Matching, nearly any old Node may have projections trailing it. 44 // These are usually machine-dependent flags. In any case, they might 45 // float to another block below this one. Move them up. 46 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 47 Node* use = n->fast_out(i); 48 if (use->is_Proj()) { 49 Block* buse = _bbs[use->_idx]; 50 if (buse != b) { // In wrong block? 51 if (buse != NULL) 52 buse->find_remove(use); // Remove from wrong block 53 _bbs.map(use->_idx, b); // Re-insert in this block 54 b->add_inst(use); 55 } 56 } 57 } 58 } 59 60 61 //------------------------------schedule_pinned_nodes-------------------------- 62 // Set the basic block for Nodes pinned into blocks 63 void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) { 64 // Allocate node stack of size C->unique()+8 to avoid frequent realloc 65 GrowableArray <Node *> spstack(C->unique()+8); 66 spstack.push(_root); 67 while ( spstack.is_nonempty() ) { 68 Node *n = spstack.pop(); 69 if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited 70 if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down! 71 Node *input = n->in(0); 72 assert( input, "pinned Node must have Control" ); 73 while( !input->is_block_start() ) 74 input = input->in(0); 75 Block *b = _bbs[input->_idx]; // Basic block of controlling input 76 schedule_node_into_block(n, b); 77 } 78 for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs 79 if( n->in(i) != NULL ) 80 spstack.push(n->in(i)); 81 } 82 } 83 } 84 } 85 86 #ifdef ASSERT 87 // Assert that new input b2 is dominated by all previous inputs. 88 // Check this by by seeing that it is dominated by b1, the deepest 89 // input observed until b2. 90 static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) { 91 if (b1 == NULL) return; 92 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 93 Block* tmp = b2; 94 while (tmp != b1 && tmp != NULL) { 95 tmp = tmp->_idom; 96 } 97 if (tmp != b1) { 98 // Detected an unschedulable graph. Print some nice stuff and die. 99 tty->print_cr("!!! Unschedulable graph !!!"); 100 for (uint j=0; j<n->len(); j++) { // For all inputs 101 Node* inn = n->in(j); // Get input 102 if (inn == NULL) continue; // Ignore NULL, missing inputs 103 Block* inb = bbs[inn->_idx]; 104 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 105 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 106 inn->dump(); 107 } 108 tty->print("Failing node: "); 109 n->dump(); 110 assert(false, "unscheduable graph"); 111 } 112 } 113 #endif 114 115 static Block* find_deepest_input(Node* n, Block_Array &bbs) { 116 // Find the last input dominated by all other inputs. 117 Block* deepb = NULL; // Deepest block so far 118 int deepb_dom_depth = 0; 119 for (uint k = 0; k < n->len(); k++) { // For all inputs 120 Node* inn = n->in(k); // Get input 121 if (inn == NULL) continue; // Ignore NULL, missing inputs 122 Block* inb = bbs[inn->_idx]; 123 assert(inb != NULL, "must already have scheduled this input"); 124 if (deepb_dom_depth < (int) inb->_dom_depth) { 125 // The new inb must be dominated by the previous deepb. 126 // The various inputs must be linearly ordered in the dom 127 // tree, or else there will not be a unique deepest block. 128 DEBUG_ONLY(assert_dom(deepb, inb, n, bbs)); 129 deepb = inb; // Save deepest block 130 deepb_dom_depth = deepb->_dom_depth; 131 } 132 } 133 assert(deepb != NULL, "must be at least one input to n"); 134 return deepb; 135 } 136 137 138 //------------------------------schedule_early--------------------------------- 139 // Find the earliest Block any instruction can be placed in. Some instructions 140 // are pinned into Blocks. Unpinned instructions can appear in last block in 141 // which all their inputs occur. 142 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 143 // Allocate stack with enough space to avoid frequent realloc 144 Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats 145 // roots.push(_root); _root will be processed among C->top() inputs 146 roots.push(C->top()); 147 visited.set(C->top()->_idx); 148 149 while (roots.size() != 0) { 150 // Use local variables nstack_top_n & nstack_top_i to cache values 151 // on stack's top. 152 Node *nstack_top_n = roots.pop(); 153 uint nstack_top_i = 0; 154 //while_nstack_nonempty: 155 while (true) { 156 // Get parent node and next input's index from stack's top. 157 Node *n = nstack_top_n; 158 uint i = nstack_top_i; 159 160 if (i == 0) { 161 // Special control input processing. 162 // While I am here, go ahead and look for Nodes which are taking control 163 // from a is_block_proj Node. After I inserted RegionNodes to make proper 164 // blocks, the control at a is_block_proj more properly comes from the 165 // Region being controlled by the block_proj Node. 166 const Node *in0 = n->in(0); 167 if (in0 != NULL) { // Control-dependent? 168 const Node *p = in0->is_block_proj(); 169 if (p != NULL && p != n) { // Control from a block projection? 170 // Find trailing Region 171 Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block 172 uint j = 0; 173 if (pb->_num_succs != 1) { // More then 1 successor? 174 // Search for successor 175 uint max = pb->_nodes.size(); 176 assert( max > 1, "" ); 177 uint start = max - pb->_num_succs; 178 // Find which output path belongs to projection 179 for (j = start; j < max; j++) { 180 if( pb->_nodes[j] == in0 ) 181 break; 182 } 183 assert( j < max, "must find" ); 184 // Change control to match head of successor basic block 185 j -= start; 186 } 187 n->set_req(0, pb->_succs[j]->head()); 188 } 189 } else { // n->in(0) == NULL 190 if (n->req() == 1) { // This guy is a constant with NO inputs? 191 n->set_req(0, _root); 192 } 193 } 194 } 195 196 // First, visit all inputs and force them to get a block. If an 197 // input is already in a block we quit following inputs (to avoid 198 // cycles). Instead we put that Node on a worklist to be handled 199 // later (since IT'S inputs may not have a block yet). 200 bool done = true; // Assume all n's inputs will be processed 201 while (i < n->len()) { // For all inputs 202 Node *in = n->in(i); // Get input 203 ++i; 204 if (in == NULL) continue; // Ignore NULL, missing inputs 205 int is_visited = visited.test_set(in->_idx); 206 if (!_bbs.lookup(in->_idx)) { // Missing block selection? 207 if (is_visited) { 208 // assert( !visited.test(in->_idx), "did not schedule early" ); 209 return false; 210 } 211 nstack.push(n, i); // Save parent node and next input's index. 212 nstack_top_n = in; // Process current input now. 213 nstack_top_i = 0; 214 done = false; // Not all n's inputs processed. 215 break; // continue while_nstack_nonempty; 216 } else if (!is_visited) { // Input not yet visited? 217 roots.push(in); // Visit this guy later, using worklist 218 } 219 } 220 if (done) { 221 // All of n's inputs have been processed, complete post-processing. 222 223 // Some instructions are pinned into a block. These include Region, 224 // Phi, Start, Return, and other control-dependent instructions and 225 // any projections which depend on them. 226 if (!n->pinned()) { 227 // Set earliest legal block. 228 _bbs.map(n->_idx, find_deepest_input(n, _bbs)); 229 } 230 231 if (nstack.is_empty()) { 232 // Finished all nodes on stack. 233 // Process next node on the worklist 'roots'. 234 break; 235 } 236 // Get saved parent node and next input's index. 237 nstack_top_n = nstack.node(); 238 nstack_top_i = nstack.index(); 239 nstack.pop(); 240 } // if (done) 241 } // while (true) 242 } // while (roots.size() != 0) 243 return true; 244 } 245 246 //------------------------------dom_lca---------------------------------------- 247 // Find least common ancestor in dominator tree 248 // LCA is a current notion of LCA, to be raised above 'this'. 249 // As a convenient boundary condition, return 'this' if LCA is NULL. 250 // Find the LCA of those two nodes. 251 Block* Block::dom_lca(Block* LCA) { 252 if (LCA == NULL || LCA == this) return this; 253 254 Block* anc = this; 255 while (anc->_dom_depth > LCA->_dom_depth) 256 anc = anc->_idom; // Walk up till anc is as high as LCA 257 258 while (LCA->_dom_depth > anc->_dom_depth) 259 LCA = LCA->_idom; // Walk up till LCA is as high as anc 260 261 while (LCA != anc) { // Walk both up till they are the same 262 LCA = LCA->_idom; 263 anc = anc->_idom; 264 } 265 266 return LCA; 267 } 268 269 //--------------------------raise_LCA_above_use-------------------------------- 270 // We are placing a definition, and have been given a def->use edge. 271 // The definition must dominate the use, so move the LCA upward in the 272 // dominator tree to dominate the use. If the use is a phi, adjust 273 // the LCA only with the phi input paths which actually use this def. 274 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) { 275 Block* buse = bbs[use->_idx]; 276 if (buse == NULL) return LCA; // Unused killing Projs have no use block 277 if (!use->is_Phi()) return buse->dom_lca(LCA); 278 uint pmax = use->req(); // Number of Phi inputs 279 // Why does not this loop just break after finding the matching input to 280 // the Phi? Well...it's like this. I do not have true def-use/use-def 281 // chains. Means I cannot distinguish, from the def-use direction, which 282 // of many use-defs lead from the same use to the same def. That is, this 283 // Phi might have several uses of the same def. Each use appears in a 284 // different predecessor block. But when I enter here, I cannot distinguish 285 // which use-def edge I should find the predecessor block for. So I find 286 // them all. Means I do a little extra work if a Phi uses the same value 287 // more than once. 288 for (uint j=1; j<pmax; j++) { // For all inputs 289 if (use->in(j) == def) { // Found matching input? 290 Block* pred = bbs[buse->pred(j)->_idx]; 291 LCA = pred->dom_lca(LCA); 292 } 293 } 294 return LCA; 295 } 296 297 //----------------------------raise_LCA_above_marks---------------------------- 298 // Return a new LCA that dominates LCA and any of its marked predecessors. 299 // Search all my parents up to 'early' (exclusive), looking for predecessors 300 // which are marked with the given index. Return the LCA (in the dom tree) 301 // of all marked blocks. If there are none marked, return the original 302 // LCA. 303 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, 304 Block* early, Block_Array &bbs) { 305 Block_List worklist; 306 worklist.push(LCA); 307 while (worklist.size() > 0) { 308 Block* mid = worklist.pop(); 309 if (mid == early) continue; // stop searching here 310 311 // Test and set the visited bit. 312 if (mid->raise_LCA_visited() == mark) continue; // already visited 313 314 // Don't process the current LCA, otherwise the search may terminate early 315 if (mid != LCA && mid->raise_LCA_mark() == mark) { 316 // Raise the LCA. 317 LCA = mid->dom_lca(LCA); 318 if (LCA == early) break; // stop searching everywhere 319 assert(early->dominates(LCA), "early is high enough"); 320 // Resume searching at that point, skipping intermediate levels. 321 worklist.push(LCA); 322 if (LCA == mid) 323 continue; // Don't mark as visited to avoid early termination. 324 } else { 325 // Keep searching through this block's predecessors. 326 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 327 Block* mid_parent = bbs[ mid->pred(j)->_idx ]; 328 worklist.push(mid_parent); 329 } 330 } 331 mid->set_raise_LCA_visited(mark); 332 } 333 return LCA; 334 } 335 336 //--------------------------memory_early_block-------------------------------- 337 // This is a variation of find_deepest_input, the heart of schedule_early. 338 // Find the "early" block for a load, if we considered only memory and 339 // address inputs, that is, if other data inputs were ignored. 340 // 341 // Because a subset of edges are considered, the resulting block will 342 // be earlier (at a shallower dom_depth) than the true schedule_early 343 // point of the node. We compute this earlier block as a more permissive 344 // site for anti-dependency insertion, but only if subsume_loads is enabled. 345 static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) { 346 Node* base; 347 Node* index; 348 Node* store = load->in(MemNode::Memory); 349 load->as_Mach()->memory_inputs(base, index); 350 351 assert(base != NodeSentinel && index != NodeSentinel, 352 "unexpected base/index inputs"); 353 354 Node* mem_inputs[4]; 355 int mem_inputs_length = 0; 356 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 357 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 358 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 359 360 // In the comparision below, add one to account for the control input, 361 // which may be null, but always takes up a spot in the in array. 362 if (mem_inputs_length + 1 < (int) load->req()) { 363 // This "load" has more inputs than just the memory, base and index inputs. 364 // For purposes of checking anti-dependences, we need to start 365 // from the early block of only the address portion of the instruction, 366 // and ignore other blocks that may have factored into the wider 367 // schedule_early calculation. 368 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 369 370 Block* deepb = NULL; // Deepest block so far 371 int deepb_dom_depth = 0; 372 for (int i = 0; i < mem_inputs_length; i++) { 373 Block* inb = bbs[mem_inputs[i]->_idx]; 374 if (deepb_dom_depth < (int) inb->_dom_depth) { 375 // The new inb must be dominated by the previous deepb. 376 // The various inputs must be linearly ordered in the dom 377 // tree, or else there will not be a unique deepest block. 378 DEBUG_ONLY(assert_dom(deepb, inb, load, bbs)); 379 deepb = inb; // Save deepest block 380 deepb_dom_depth = deepb->_dom_depth; 381 } 382 } 383 early = deepb; 384 } 385 386 return early; 387 } 388 389 //--------------------------insert_anti_dependences--------------------------- 390 // A load may need to witness memory that nearby stores can overwrite. 391 // For each nearby store, either insert an "anti-dependence" edge 392 // from the load to the store, or else move LCA upward to force the 393 // load to (eventually) be scheduled in a block above the store. 394 // 395 // Do not add edges to stores on distinct control-flow paths; 396 // only add edges to stores which might interfere. 397 // 398 // Return the (updated) LCA. There will not be any possibly interfering 399 // store between the load's "early block" and the updated LCA. 400 // Any stores in the updated LCA will have new precedence edges 401 // back to the load. The caller is expected to schedule the load 402 // in the LCA, in which case the precedence edges will make LCM 403 // preserve anti-dependences. The caller may also hoist the load 404 // above the LCA, if it is not the early block. 405 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 406 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 407 assert(LCA != NULL, ""); 408 DEBUG_ONLY(Block* LCA_orig = LCA); 409 410 // Compute the alias index. Loads and stores with different alias indices 411 // do not need anti-dependence edges. 412 uint load_alias_idx = C->get_alias_index(load->adr_type()); 413 #ifdef ASSERT 414 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 415 (PrintOpto || VerifyAliases || 416 PrintMiscellaneous && (WizardMode || Verbose))) { 417 // Load nodes should not consume all of memory. 418 // Reporting a bottom type indicates a bug in adlc. 419 // If some particular type of node validly consumes all of memory, 420 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 421 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 422 load->dump(2); 423 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 424 } 425 #endif 426 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 427 "String compare is only known 'load' that does not conflict with any stores"); 428 429 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 430 // It is impossible to spoil this load by putting stores before it, 431 // because we know that the stores will never update the value 432 // which 'load' must witness. 433 return LCA; 434 } 435 436 node_idx_t load_index = load->_idx; 437 438 // Note the earliest legal placement of 'load', as determined by 439 // by the unique point in the dom tree where all memory effects 440 // and other inputs are first available. (Computed by schedule_early.) 441 // For normal loads, 'early' is the shallowest place (dom graph wise) 442 // to look for anti-deps between this load and any store. 443 Block* early = _bbs[load_index]; 444 445 // If we are subsuming loads, compute an "early" block that only considers 446 // memory or address inputs. This block may be different than the 447 // schedule_early block in that it could be at an even shallower depth in the 448 // dominator tree, and allow for a broader discovery of anti-dependences. 449 if (C->subsume_loads()) { 450 early = memory_early_block(load, early, _bbs); 451 } 452 453 ResourceArea *area = Thread::current()->resource_area(); 454 Node_List worklist_mem(area); // prior memory state to store 455 Node_List worklist_store(area); // possible-def to explore 456 Node_List non_early_stores(area); // all relevant stores outside of early 457 bool must_raise_LCA = false; 458 DEBUG_ONLY(VectorSet should_not_repeat(area)); 459 460 #ifdef TRACK_PHI_INPUTS 461 // %%% This extra checking fails because MergeMem nodes are not GVNed. 462 // Provide "phi_inputs" to check if every input to a PhiNode is from the 463 // original memory state. This indicates a PhiNode for which should not 464 // prevent the load from sinking. For such a block, set_raise_LCA_mark 465 // may be overly conservative. 466 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 467 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 468 #endif 469 470 // 'load' uses some memory state; look for users of the same state. 471 // Recurse through MergeMem nodes to the stores that use them. 472 473 // Each of these stores is a possible definition of memory 474 // that 'load' needs to use. We need to force 'load' 475 // to occur before each such store. When the store is in 476 // the same block as 'load', we insert an anti-dependence 477 // edge load->store. 478 479 // The relevant stores "nearby" the load consist of a tree rooted 480 // at initial_mem, with internal nodes of type MergeMem. 481 // Therefore, the branches visited by the worklist are of this form: 482 // initial_mem -> (MergeMem ->)* store 483 // The anti-dependence constraints apply only to the fringe of this tree. 484 485 Node* initial_mem = load->in(MemNode::Memory); 486 worklist_store.push(initial_mem); 487 worklist_mem.push(NULL); 488 DEBUG_ONLY(should_not_repeat.test_set(initial_mem->_idx)); 489 while (worklist_store.size() > 0) { 490 // Examine a nearby store to see if it might interfere with our load. 491 Node* mem = worklist_mem.pop(); 492 Node* store = worklist_store.pop(); 493 uint op = store->Opcode(); 494 495 // MergeMems do not directly have anti-deps. 496 // Treat them as internal nodes in a forward tree of memory states, 497 // the leaves of which are each a 'possible-def'. 498 if (store == initial_mem // root (exclusive) of tree we are searching 499 || op == Op_MergeMem // internal node of tree we are searching 500 ) { 501 mem = store; // It's not a possibly interfering store. 502 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 503 store = mem->fast_out(i); 504 if (store->is_MergeMem()) { 505 // Be sure we don't get into combinatorial problems. 506 // (Allow phis to be repeated; they can merge two relevant states.) 507 uint i = worklist_store.size(); 508 for (; i > 0; i--) { 509 if (worklist_store.at(i-1) == store) break; 510 } 511 if (i > 0) continue; // already on work list; do not repeat 512 DEBUG_ONLY(int repeated = should_not_repeat.test_set(store->_idx)); 513 assert(!repeated, "do not walk merges twice"); 514 } 515 worklist_mem.push(mem); 516 worklist_store.push(store); 517 } 518 continue; 519 } 520 521 if (op == Op_MachProj || op == Op_Catch) continue; 522 if (store->needs_anti_dependence_check()) continue; // not really a store 523 524 // Compute the alias index. Loads and stores with different alias 525 // indices do not need anti-dependence edges. Wide MemBar's are 526 // anti-dependent on everything (except immutable memories). 527 const TypePtr* adr_type = store->adr_type(); 528 if (!C->can_alias(adr_type, load_alias_idx)) continue; 529 530 // Most slow-path runtime calls do NOT modify Java memory, but 531 // they can block and so write Raw memory. 532 if (store->is_Mach()) { 533 MachNode* mstore = store->as_Mach(); 534 if (load_alias_idx != Compile::AliasIdxRaw) { 535 // Check for call into the runtime using the Java calling 536 // convention (and from there into a wrapper); it has no 537 // _method. Can't do this optimization for Native calls because 538 // they CAN write to Java memory. 539 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 540 assert(mstore->is_MachSafePoint(), ""); 541 MachSafePointNode* ms = (MachSafePointNode*) mstore; 542 assert(ms->is_MachCallJava(), ""); 543 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 544 if (mcj->_method == NULL) { 545 // These runtime calls do not write to Java visible memory 546 // (other than Raw) and so do not require anti-dependence edges. 547 continue; 548 } 549 } 550 // Same for SafePoints: they read/write Raw but only read otherwise. 551 // This is basically a workaround for SafePoints only defining control 552 // instead of control + memory. 553 if (mstore->ideal_Opcode() == Op_SafePoint) 554 continue; 555 } else { 556 // Some raw memory, such as the load of "top" at an allocation, 557 // can be control dependent on the previous safepoint. See 558 // comments in GraphKit::allocate_heap() about control input. 559 // Inserting an anti-dep between such a safepoint and a use 560 // creates a cycle, and will cause a subsequent failure in 561 // local scheduling. (BugId 4919904) 562 // (%%% How can a control input be a safepoint and not a projection??) 563 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 564 continue; 565 } 566 } 567 568 // Identify a block that the current load must be above, 569 // or else observe that 'store' is all the way up in the 570 // earliest legal block for 'load'. In the latter case, 571 // immediately insert an anti-dependence edge. 572 Block* store_block = _bbs[store->_idx]; 573 assert(store_block != NULL, "unused killing projections skipped above"); 574 575 if (store->is_Phi()) { 576 // 'load' uses memory which is one (or more) of the Phi's inputs. 577 // It must be scheduled not before the Phi, but rather before 578 // each of the relevant Phi inputs. 579 // 580 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 581 // we mark each corresponding predecessor block and do a combined 582 // hoisting operation later (raise_LCA_above_marks). 583 // 584 // Do not assert(store_block != early, "Phi merging memory after access") 585 // PhiNode may be at start of block 'early' with backedge to 'early' 586 DEBUG_ONLY(bool found_match = false); 587 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 588 if (store->in(j) == mem) { // Found matching input? 589 DEBUG_ONLY(found_match = true); 590 Block* pred_block = _bbs[store_block->pred(j)->_idx]; 591 if (pred_block != early) { 592 // If any predecessor of the Phi matches the load's "early block", 593 // we do not need a precedence edge between the Phi and 'load' 594 // since the load will be forced into a block preceeding the Phi. 595 pred_block->set_raise_LCA_mark(load_index); 596 assert(!LCA_orig->dominates(pred_block) || 597 early->dominates(pred_block), "early is high enough"); 598 must_raise_LCA = true; 599 } 600 } 601 } 602 assert(found_match, "no worklist bug"); 603 #ifdef TRACK_PHI_INPUTS 604 #ifdef ASSERT 605 // This assert asks about correct handling of PhiNodes, which may not 606 // have all input edges directly from 'mem'. See BugId 4621264 607 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 608 // Increment by exactly one even if there are multiple copies of 'mem' 609 // coming into the phi, because we will run this block several times 610 // if there are several copies of 'mem'. (That's how DU iterators work.) 611 phi_inputs.at_put(store->_idx, num_mem_inputs); 612 assert(PhiNode::Input + num_mem_inputs < store->req(), 613 "Expect at least one phi input will not be from original memory state"); 614 #endif //ASSERT 615 #endif //TRACK_PHI_INPUTS 616 } else if (store_block != early) { 617 // 'store' is between the current LCA and earliest possible block. 618 // Label its block, and decide later on how to raise the LCA 619 // to include the effect on LCA of this store. 620 // If this store's block gets chosen as the raised LCA, we 621 // will find him on the non_early_stores list and stick him 622 // with a precedence edge. 623 // (But, don't bother if LCA is already raised all the way.) 624 if (LCA != early) { 625 store_block->set_raise_LCA_mark(load_index); 626 must_raise_LCA = true; 627 non_early_stores.push(store); 628 } 629 } else { 630 // Found a possibly-interfering store in the load's 'early' block. 631 // This means 'load' cannot sink at all in the dominator tree. 632 // Add an anti-dep edge, and squeeze 'load' into the highest block. 633 assert(store != load->in(0), "dependence cycle found"); 634 if (verify) { 635 assert(store->find_edge(load) != -1, "missing precedence edge"); 636 } else { 637 store->add_prec(load); 638 } 639 LCA = early; 640 // This turns off the process of gathering non_early_stores. 641 } 642 } 643 // (Worklist is now empty; all nearby stores have been visited.) 644 645 // Finished if 'load' must be scheduled in its 'early' block. 646 // If we found any stores there, they have already been given 647 // precedence edges. 648 if (LCA == early) return LCA; 649 650 // We get here only if there are no possibly-interfering stores 651 // in the load's 'early' block. Move LCA up above all predecessors 652 // which contain stores we have noted. 653 // 654 // The raised LCA block can be a home to such interfering stores, 655 // but its predecessors must not contain any such stores. 656 // 657 // The raised LCA will be a lower bound for placing the load, 658 // preventing the load from sinking past any block containing 659 // a store that may invalidate the memory state required by 'load'. 660 if (must_raise_LCA) 661 LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs); 662 if (LCA == early) return LCA; 663 664 // Insert anti-dependence edges from 'load' to each store 665 // in the non-early LCA block. 666 // Mine the non_early_stores list for such stores. 667 if (LCA->raise_LCA_mark() == load_index) { 668 while (non_early_stores.size() > 0) { 669 Node* store = non_early_stores.pop(); 670 Block* store_block = _bbs[store->_idx]; 671 if (store_block == LCA) { 672 // add anti_dependence from store to load in its own block 673 assert(store != load->in(0), "dependence cycle found"); 674 if (verify) { 675 assert(store->find_edge(load) != -1, "missing precedence edge"); 676 } else { 677 store->add_prec(load); 678 } 679 } else { 680 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 681 // Any other stores we found must be either inside the new LCA 682 // or else outside the original LCA. In the latter case, they 683 // did not interfere with any use of 'load'. 684 assert(LCA->dominates(store_block) 685 || !LCA_orig->dominates(store_block), "no stray stores"); 686 } 687 } 688 } 689 690 // Return the highest block containing stores; any stores 691 // within that block have been given anti-dependence edges. 692 return LCA; 693 } 694 695 // This class is used to iterate backwards over the nodes in the graph. 696 697 class Node_Backward_Iterator { 698 699 private: 700 Node_Backward_Iterator(); 701 702 public: 703 // Constructor for the iterator 704 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs); 705 706 // Postincrement operator to iterate over the nodes 707 Node *next(); 708 709 private: 710 VectorSet &_visited; 711 Node_List &_stack; 712 Block_Array &_bbs; 713 }; 714 715 // Constructor for the Node_Backward_Iterator 716 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs ) 717 : _visited(visited), _stack(stack), _bbs(bbs) { 718 // The stack should contain exactly the root 719 stack.clear(); 720 stack.push(root); 721 722 // Clear the visited bits 723 visited.Clear(); 724 } 725 726 // Iterator for the Node_Backward_Iterator 727 Node *Node_Backward_Iterator::next() { 728 729 // If the _stack is empty, then just return NULL: finished. 730 if ( !_stack.size() ) 731 return NULL; 732 733 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 734 // made stateless, so I do not need to record the index 'i' on my _stack. 735 // Instead I visit all users each time, scanning for unvisited users. 736 // I visit unvisited not-anti-dependence users first, then anti-dependent 737 // children next. 738 Node *self = _stack.pop(); 739 740 // I cycle here when I am entering a deeper level of recursion. 741 // The key variable 'self' was set prior to jumping here. 742 while( 1 ) { 743 744 _visited.set(self->_idx); 745 746 // Now schedule all uses as late as possible. 747 uint src = self->is_Proj() ? self->in(0)->_idx : self->_idx; 748 uint src_rpo = _bbs[src]->_rpo; 749 750 // Schedule all nodes in a post-order visit 751 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 752 753 // Scan for unvisited nodes 754 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 755 // For all uses, schedule late 756 Node* n = self->fast_out(i); // Use 757 758 // Skip already visited children 759 if ( _visited.test(n->_idx) ) 760 continue; 761 762 // do not traverse backward control edges 763 Node *use = n->is_Proj() ? n->in(0) : n; 764 uint use_rpo = _bbs[use->_idx]->_rpo; 765 766 if ( use_rpo < src_rpo ) 767 continue; 768 769 // Phi nodes always precede uses in a basic block 770 if ( use_rpo == src_rpo && use->is_Phi() ) 771 continue; 772 773 unvisited = n; // Found unvisited 774 775 // Check for possible-anti-dependent 776 if( !n->needs_anti_dependence_check() ) 777 break; // Not visited, not anti-dep; schedule it NOW 778 } 779 780 // Did I find an unvisited not-anti-dependent Node? 781 if ( !unvisited ) 782 break; // All done with children; post-visit 'self' 783 784 // Visit the unvisited Node. Contains the obvious push to 785 // indicate I'm entering a deeper level of recursion. I push the 786 // old state onto the _stack and set a new state and loop (recurse). 787 _stack.push(self); 788 self = unvisited; 789 } // End recursion loop 790 791 return self; 792 } 793 794 //------------------------------ComputeLatenciesBackwards---------------------- 795 // Compute the latency of all the instructions. 796 void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) { 797 #ifndef PRODUCT 798 if (trace_opto_pipelining()) 799 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 800 #endif 801 802 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); 803 Node *n; 804 805 // Walk over all the nodes from last to first 806 while (n = iter.next()) { 807 // Set the latency for the definitions of this instruction 808 partial_latency_of_defs(n); 809 } 810 } // end ComputeLatenciesBackwards 811 812 //------------------------------partial_latency_of_defs------------------------ 813 // Compute the latency impact of this node on all defs. This computes 814 // a number that increases as we approach the beginning of the routine. 815 void PhaseCFG::partial_latency_of_defs(Node *n) { 816 // Set the latency for this instruction 817 #ifndef PRODUCT 818 if (trace_opto_pipelining()) { 819 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", 820 n->_idx, _node_latency.at_grow(n->_idx)); 821 dump(); 822 } 823 #endif 824 825 if (n->is_Proj()) 826 n = n->in(0); 827 828 if (n->is_Root()) 829 return; 830 831 uint nlen = n->len(); 832 uint use_latency = _node_latency.at_grow(n->_idx); 833 uint use_pre_order = _bbs[n->_idx]->_pre_order; 834 835 for ( uint j=0; j<nlen; j++ ) { 836 Node *def = n->in(j); 837 838 if (!def || def == n) 839 continue; 840 841 // Walk backwards thru projections 842 if (def->is_Proj()) 843 def = def->in(0); 844 845 #ifndef PRODUCT 846 if (trace_opto_pipelining()) { 847 tty->print("# in(%2d): ", j); 848 def->dump(); 849 } 850 #endif 851 852 // If the defining block is not known, assume it is ok 853 Block *def_block = _bbs[def->_idx]; 854 uint def_pre_order = def_block ? def_block->_pre_order : 0; 855 856 if ( (use_pre_order < def_pre_order) || 857 (use_pre_order == def_pre_order && n->is_Phi()) ) 858 continue; 859 860 uint delta_latency = n->latency(j); 861 uint current_latency = delta_latency + use_latency; 862 863 if (_node_latency.at_grow(def->_idx) < current_latency) { 864 _node_latency.at_put_grow(def->_idx, current_latency); 865 } 866 867 #ifndef PRODUCT 868 if (trace_opto_pipelining()) { 869 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", 870 use_latency, j, delta_latency, current_latency, def->_idx, 871 _node_latency.at_grow(def->_idx)); 872 } 873 #endif 874 } 875 } 876 877 //------------------------------latency_from_use------------------------------- 878 // Compute the latency of a specific use 879 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 880 // If self-reference, return no latency 881 if (use == n || use->is_Root()) 882 return 0; 883 884 uint def_pre_order = _bbs[def->_idx]->_pre_order; 885 uint latency = 0; 886 887 // If the use is not a projection, then it is simple... 888 if (!use->is_Proj()) { 889 #ifndef PRODUCT 890 if (trace_opto_pipelining()) { 891 tty->print("# out(): "); 892 use->dump(); 893 } 894 #endif 895 896 uint use_pre_order = _bbs[use->_idx]->_pre_order; 897 898 if (use_pre_order < def_pre_order) 899 return 0; 900 901 if (use_pre_order == def_pre_order && use->is_Phi()) 902 return 0; 903 904 uint nlen = use->len(); 905 uint nl = _node_latency.at_grow(use->_idx); 906 907 for ( uint j=0; j<nlen; j++ ) { 908 if (use->in(j) == n) { 909 // Change this if we want local latencies 910 uint ul = use->latency(j); 911 uint l = ul + nl; 912 if (latency < l) latency = l; 913 #ifndef PRODUCT 914 if (trace_opto_pipelining()) { 915 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 916 nl, j, ul, l, latency); 917 } 918 #endif 919 } 920 } 921 } else { 922 // This is a projection, just grab the latency of the use(s) 923 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 924 uint l = latency_from_use(use, def, use->fast_out(j)); 925 if (latency < l) latency = l; 926 } 927 } 928 929 return latency; 930 } 931 932 //------------------------------latency_from_uses------------------------------ 933 // Compute the latency of this instruction relative to all of it's uses. 934 // This computes a number that increases as we approach the beginning of the 935 // routine. 936 void PhaseCFG::latency_from_uses(Node *n) { 937 // Set the latency for this instruction 938 #ifndef PRODUCT 939 if (trace_opto_pipelining()) { 940 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", 941 n->_idx, _node_latency.at_grow(n->_idx)); 942 dump(); 943 } 944 #endif 945 uint latency=0; 946 const Node *def = n->is_Proj() ? n->in(0): n; 947 948 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 949 uint l = latency_from_use(n, def, n->fast_out(i)); 950 951 if (latency < l) latency = l; 952 } 953 954 _node_latency.at_put_grow(n->_idx, latency); 955 } 956 957 //------------------------------hoist_to_cheaper_block------------------------- 958 // Pick a block for node self, between early and LCA, that is a cheaper 959 // alternative to LCA. 960 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 961 const double delta = 1+PROB_UNLIKELY_MAG(4); 962 Block* least = LCA; 963 double least_freq = least->_freq; 964 uint target = _node_latency.at_grow(self->_idx); 965 uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx); 966 uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx); 967 bool in_latency = (target <= start_latency); 968 const Block* root_block = _bbs[_root->_idx]; 969 970 // Turn off latency scheduling if scheduling is just plain off 971 if (!C->do_scheduling()) 972 in_latency = true; 973 974 // Do not hoist (to cover latency) instructions which target a 975 // single register. Hoisting stretches the live range of the 976 // single register and may force spilling. 977 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 978 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 979 in_latency = true; 980 981 #ifndef PRODUCT 982 if (trace_opto_pipelining()) { 983 tty->print("# Find cheaper block for latency %d: ", 984 _node_latency.at_grow(self->_idx)); 985 self->dump(); 986 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 987 LCA->_pre_order, 988 LCA->_nodes[0]->_idx, 989 start_latency, 990 LCA->_nodes[LCA->end_idx()]->_idx, 991 end_latency, 992 least_freq); 993 } 994 #endif 995 996 // Walk up the dominator tree from LCA (Lowest common ancestor) to 997 // the earliest legal location. Capture the least execution frequency. 998 while (LCA != early) { 999 LCA = LCA->_idom; // Follow up the dominator tree 1000 1001 if (LCA == NULL) { 1002 // Bailout without retry 1003 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1004 return least; 1005 } 1006 1007 // Don't hoist machine instructions to the root basic block 1008 if (mach && LCA == root_block) 1009 break; 1010 1011 uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx); 1012 uint end_idx = LCA->end_idx(); 1013 uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx); 1014 double LCA_freq = LCA->_freq; 1015 #ifndef PRODUCT 1016 if (trace_opto_pipelining()) { 1017 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1018 LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq); 1019 } 1020 #endif 1021 if (LCA_freq < least_freq || // Better Frequency 1022 ( !in_latency && // No block containing latency 1023 LCA_freq < least_freq * delta && // No worse frequency 1024 target >= end_lat && // within latency range 1025 !self->is_iteratively_computed() ) // But don't hoist IV increments 1026 // because they may end up above other uses of their phi forcing 1027 // their result register to be different from their input. 1028 ) { 1029 least = LCA; // Found cheaper block 1030 least_freq = LCA_freq; 1031 start_latency = start_lat; 1032 end_latency = end_lat; 1033 if (target <= start_lat) 1034 in_latency = true; 1035 } 1036 } 1037 1038 #ifndef PRODUCT 1039 if (trace_opto_pipelining()) { 1040 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1041 least->_pre_order, start_latency, least_freq); 1042 } 1043 #endif 1044 1045 // See if the latency needs to be updated 1046 if (target < end_latency) { 1047 #ifndef PRODUCT 1048 if (trace_opto_pipelining()) { 1049 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1050 } 1051 #endif 1052 _node_latency.at_put_grow(self->_idx, end_latency); 1053 partial_latency_of_defs(self); 1054 } 1055 1056 return least; 1057 } 1058 1059 1060 //------------------------------schedule_late----------------------------------- 1061 // Now schedule all codes as LATE as possible. This is the LCA in the 1062 // dominator tree of all USES of a value. Pick the block with the least 1063 // loop nesting depth that is lowest in the dominator tree. 1064 extern const char must_clone[]; 1065 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1066 #ifndef PRODUCT 1067 if (trace_opto_pipelining()) 1068 tty->print("\n#---- schedule_late ----\n"); 1069 #endif 1070 1071 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); 1072 Node *self; 1073 1074 // Walk over all the nodes from last to first 1075 while (self = iter.next()) { 1076 Block* early = _bbs[self->_idx]; // Earliest legal placement 1077 1078 if (self->is_top()) { 1079 // Top node goes in bb #2 with other constants. 1080 // It must be special-cased, because it has no out edges. 1081 early->add_inst(self); 1082 continue; 1083 } 1084 1085 // No uses, just terminate 1086 if (self->outcnt() == 0) { 1087 assert(self->Opcode() == Op_MachProj, "sanity"); 1088 continue; // Must be a dead machine projection 1089 } 1090 1091 // If node is pinned in the block, then no scheduling can be done. 1092 if( self->pinned() ) // Pinned in block? 1093 continue; 1094 1095 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1096 if (mach) { 1097 switch (mach->ideal_Opcode()) { 1098 case Op_CreateEx: 1099 // Don't move exception creation 1100 early->add_inst(self); 1101 continue; 1102 break; 1103 case Op_CheckCastPP: 1104 // Don't move CheckCastPP nodes away from their input, if the input 1105 // is a rawptr (5071820). 1106 Node *def = self->in(1); 1107 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1108 early->add_inst(self); 1109 continue; 1110 } 1111 break; 1112 } 1113 } 1114 1115 // Gather LCA of all uses 1116 Block *LCA = NULL; 1117 { 1118 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1119 // For all uses, find LCA 1120 Node* use = self->fast_out(i); 1121 LCA = raise_LCA_above_use(LCA, use, self, _bbs); 1122 } 1123 } // (Hide defs of imax, i from rest of block.) 1124 1125 // Place temps in the block of their use. This isn't a 1126 // requirement for correctness but it reduces useless 1127 // interference between temps and other nodes. 1128 if (mach != NULL && mach->is_MachTemp()) { 1129 _bbs.map(self->_idx, LCA); 1130 LCA->add_inst(self); 1131 continue; 1132 } 1133 1134 // Check if 'self' could be anti-dependent on memory 1135 if (self->needs_anti_dependence_check()) { 1136 // Hoist LCA above possible-defs and insert anti-dependences to 1137 // defs in new LCA block. 1138 LCA = insert_anti_dependences(LCA, self); 1139 } 1140 1141 if (early->_dom_depth > LCA->_dom_depth) { 1142 // Somehow the LCA has moved above the earliest legal point. 1143 // (One way this can happen is via memory_early_block.) 1144 if (C->subsume_loads() == true && !C->failing()) { 1145 // Retry with subsume_loads == false 1146 // If this is the first failure, the sentinel string will "stick" 1147 // to the Compile object, and the C2Compiler will see it and retry. 1148 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1149 } else { 1150 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1151 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1152 } 1153 return; 1154 } 1155 1156 // If there is no opportunity to hoist, then we're done. 1157 bool try_to_hoist = (LCA != early); 1158 1159 // Must clone guys stay next to use; no hoisting allowed. 1160 // Also cannot hoist guys that alter memory or are otherwise not 1161 // allocatable (hoisting can make a value live longer, leading to 1162 // anti and output dependency problems which are normally resolved 1163 // by the register allocator giving everyone a different register). 1164 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1165 try_to_hoist = false; 1166 1167 Block* late = NULL; 1168 if (try_to_hoist) { 1169 // Now find the block with the least execution frequency. 1170 // Start at the latest schedule and work up to the earliest schedule 1171 // in the dominator tree. Thus the Node will dominate all its uses. 1172 late = hoist_to_cheaper_block(LCA, early, self); 1173 } else { 1174 // Just use the LCA of the uses. 1175 late = LCA; 1176 } 1177 1178 // Put the node into target block 1179 schedule_node_into_block(self, late); 1180 1181 #ifdef ASSERT 1182 if (self->needs_anti_dependence_check()) { 1183 // since precedence edges are only inserted when we're sure they 1184 // are needed make sure that after placement in a block we don't 1185 // need any new precedence edges. 1186 verify_anti_dependences(late, self); 1187 } 1188 #endif 1189 } // Loop until all nodes have been visited 1190 1191 } // end ScheduleLate 1192 1193 //------------------------------GlobalCodeMotion------------------------------- 1194 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) { 1195 ResourceMark rm; 1196 1197 #ifndef PRODUCT 1198 if (trace_opto_pipelining()) { 1199 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1200 } 1201 #endif 1202 1203 // Initialize the bbs.map for things on the proj_list 1204 uint i; 1205 for( i=0; i < proj_list.size(); i++ ) 1206 _bbs.map(proj_list[i]->_idx, NULL); 1207 1208 // Set the basic block for Nodes pinned into blocks 1209 Arena *a = Thread::current()->resource_area(); 1210 VectorSet visited(a); 1211 schedule_pinned_nodes( visited ); 1212 1213 // Find the earliest Block any instruction can be placed in. Some 1214 // instructions are pinned into Blocks. Unpinned instructions can 1215 // appear in last block in which all their inputs occur. 1216 visited.Clear(); 1217 Node_List stack(a); 1218 stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list 1219 if (!schedule_early(visited, stack)) { 1220 // Bailout without retry 1221 C->record_method_not_compilable("early schedule failed"); 1222 return; 1223 } 1224 1225 // Build Def-Use edges. 1226 proj_list.push(_root); // Add real root as another root 1227 proj_list.pop(); 1228 1229 // Compute the latency information (via backwards walk) for all the 1230 // instructions in the graph 1231 GrowableArray<uint> node_latency; 1232 _node_latency = node_latency; 1233 1234 if( C->do_scheduling() ) 1235 ComputeLatenciesBackwards(visited, stack); 1236 1237 // Now schedule all codes as LATE as possible. This is the LCA in the 1238 // dominator tree of all USES of a value. Pick the block with the least 1239 // loop nesting depth that is lowest in the dominator tree. 1240 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1241 schedule_late(visited, stack); 1242 if( C->failing() ) { 1243 // schedule_late fails only when graph is incorrect. 1244 assert(!VerifyGraphEdges, "verification should have failed"); 1245 return; 1246 } 1247 1248 unique = C->unique(); 1249 1250 #ifndef PRODUCT 1251 if (trace_opto_pipelining()) { 1252 tty->print("\n---- Detect implicit null checks ----\n"); 1253 } 1254 #endif 1255 1256 // Detect implicit-null-check opportunities. Basically, find NULL checks 1257 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1258 // I can generate a memory op if there is not one nearby. 1259 if (C->is_method_compilation()) { 1260 // Don't do it for natives, adapters, or runtime stubs 1261 int allowed_reasons = 0; 1262 // ...and don't do it when there have been too many traps, globally. 1263 for (int reason = (int)Deoptimization::Reason_none+1; 1264 reason < Compile::trapHistLength; reason++) { 1265 assert(reason < BitsPerInt, "recode bit map"); 1266 if (!C->too_many_traps((Deoptimization::DeoptReason) reason)) 1267 allowed_reasons |= nth_bit(reason); 1268 } 1269 // By reversing the loop direction we get a very minor gain on mpegaudio. 1270 // Feel free to revert to a forward loop for clarity. 1271 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1272 for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) { 1273 Node *proj = matcher._null_check_tests[i ]; 1274 Node *val = matcher._null_check_tests[i+1]; 1275 _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons); 1276 // The implicit_null_check will only perform the transformation 1277 // if the null branch is truly uncommon, *and* it leads to an 1278 // uncommon trap. Combined with the too_many_traps guards 1279 // above, this prevents SEGV storms reported in 6366351, 1280 // by recompiling offending methods without this optimization. 1281 } 1282 } 1283 1284 #ifndef PRODUCT 1285 if (trace_opto_pipelining()) { 1286 tty->print("\n---- Start Local Scheduling ----\n"); 1287 } 1288 #endif 1289 1290 // Schedule locally. Right now a simple topological sort. 1291 // Later, do a real latency aware scheduler. 1292 int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique()); 1293 memset( ready_cnt, -1, C->unique() * sizeof(int) ); 1294 visited.Clear(); 1295 for (i = 0; i < _num_blocks; i++) { 1296 if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) { 1297 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1298 C->record_method_not_compilable("local schedule failed"); 1299 } 1300 return; 1301 } 1302 } 1303 1304 // If we inserted any instructions between a Call and his CatchNode, 1305 // clone the instructions on all paths below the Catch. 1306 for( i=0; i < _num_blocks; i++ ) 1307 _blocks[i]->call_catch_cleanup(_bbs); 1308 1309 #ifndef PRODUCT 1310 if (trace_opto_pipelining()) { 1311 tty->print("\n---- After GlobalCodeMotion ----\n"); 1312 for (uint i = 0; i < _num_blocks; i++) { 1313 _blocks[i]->dump(); 1314 } 1315 } 1316 #endif 1317 } 1318 1319 1320 //------------------------------Estimate_Block_Frequency----------------------- 1321 // Estimate block frequencies based on IfNode probabilities. 1322 void PhaseCFG::Estimate_Block_Frequency() { 1323 int cnts = C->method() ? C->method()->interpreter_invocation_count() : 1; 1324 // Most of our algorithms will die horribly if frequency can become 1325 // negative so make sure cnts is a sane value. 1326 if( cnts <= 0 ) cnts = 1; 1327 float f = (float)cnts/(float)FreqCountInvocations; 1328 1329 // Create the loop tree and calculate loop depth. 1330 _root_loop = create_loop_tree(); 1331 _root_loop->compute_loop_depth(0); 1332 1333 // Compute block frequency of each block, relative to a single loop entry. 1334 _root_loop->compute_freq(); 1335 1336 // Adjust all frequencies to be relative to a single method entry 1337 _root_loop->_freq = f * 1.0; 1338 _root_loop->scale_freq(); 1339 1340 // force paths ending at uncommon traps to be infrequent 1341 Block_List worklist; 1342 Block* root_blk = _blocks[0]; 1343 for (uint i = 0; i < root_blk->num_preds(); i++) { 1344 Block *pb = _bbs[root_blk->pred(i)->_idx]; 1345 if (pb->has_uncommon_code()) { 1346 worklist.push(pb); 1347 } 1348 } 1349 while (worklist.size() > 0) { 1350 Block* uct = worklist.pop(); 1351 uct->_freq = PROB_MIN; 1352 for (uint i = 0; i < uct->num_preds(); i++) { 1353 Block *pb = _bbs[uct->pred(i)->_idx]; 1354 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1355 worklist.push(pb); 1356 } 1357 } 1358 } 1359 1360 #ifndef PRODUCT 1361 if (PrintCFGBlockFreq) { 1362 tty->print_cr("CFG Block Frequencies"); 1363 _root_loop->dump_tree(); 1364 if (Verbose) { 1365 tty->print_cr("PhaseCFG dump"); 1366 dump(); 1367 tty->print_cr("Node dump"); 1368 _root->dump(99999); 1369 } 1370 } 1371 #endif 1372 } 1373 1374 //----------------------------create_loop_tree-------------------------------- 1375 // Create a loop tree from the CFG 1376 CFGLoop* PhaseCFG::create_loop_tree() { 1377 1378 #ifdef ASSERT 1379 assert( _blocks[0] == _broot, "" ); 1380 for (uint i = 0; i < _num_blocks; i++ ) { 1381 Block *b = _blocks[i]; 1382 // Check that _loop field are clear...we could clear them if not. 1383 assert(b->_loop == NULL, "clear _loop expected"); 1384 // Sanity check that the RPO numbering is reflected in the _blocks array. 1385 // It doesn't have to be for the loop tree to be built, but if it is not, 1386 // then the blocks have been reordered since dom graph building...which 1387 // may question the RPO numbering 1388 assert(b->_rpo == i, "unexpected reverse post order number"); 1389 } 1390 #endif 1391 1392 int idct = 0; 1393 CFGLoop* root_loop = new CFGLoop(idct++); 1394 1395 Block_List worklist; 1396 1397 // Assign blocks to loops 1398 for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block 1399 Block *b = _blocks[i]; 1400 1401 if (b->head()->is_Loop()) { 1402 Block* loop_head = b; 1403 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1404 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1405 Block* tail = _bbs[tail_n->_idx]; 1406 1407 // Defensively filter out Loop nodes for non-single-entry loops. 1408 // For all reasonable loops, the head occurs before the tail in RPO. 1409 if (i <= tail->_rpo) { 1410 1411 // The tail and (recursive) predecessors of the tail 1412 // are made members of a new loop. 1413 1414 assert(worklist.size() == 0, "nonempty worklist"); 1415 CFGLoop* nloop = new CFGLoop(idct++); 1416 assert(loop_head->_loop == NULL, "just checking"); 1417 loop_head->_loop = nloop; 1418 // Add to nloop so push_pred() will skip over inner loops 1419 nloop->add_member(loop_head); 1420 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs); 1421 1422 while (worklist.size() > 0) { 1423 Block* member = worklist.pop(); 1424 if (member != loop_head) { 1425 for (uint j = 1; j < member->num_preds(); j++) { 1426 nloop->push_pred(member, j, worklist, _bbs); 1427 } 1428 } 1429 } 1430 } 1431 } 1432 } 1433 1434 // Create a member list for each loop consisting 1435 // of both blocks and (immediate child) loops. 1436 for (uint i = 0; i < _num_blocks; i++) { 1437 Block *b = _blocks[i]; 1438 CFGLoop* lp = b->_loop; 1439 if (lp == NULL) { 1440 // Not assigned to a loop. Add it to the method's pseudo loop. 1441 b->_loop = root_loop; 1442 lp = root_loop; 1443 } 1444 if (lp == root_loop || b != lp->head()) { // loop heads are already members 1445 lp->add_member(b); 1446 } 1447 if (lp != root_loop) { 1448 if (lp->parent() == NULL) { 1449 // Not a nested loop. Make it a child of the method's pseudo loop. 1450 root_loop->add_nested_loop(lp); 1451 } 1452 if (b == lp->head()) { 1453 // Add nested loop to member list of parent loop. 1454 lp->parent()->add_member(lp); 1455 } 1456 } 1457 } 1458 1459 return root_loop; 1460 } 1461 1462 //------------------------------push_pred-------------------------------------- 1463 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) { 1464 Node* pred_n = blk->pred(i); 1465 Block* pred = node_to_blk[pred_n->_idx]; 1466 CFGLoop *pred_loop = pred->_loop; 1467 if (pred_loop == NULL) { 1468 // Filter out blocks for non-single-entry loops. 1469 // For all reasonable loops, the head occurs before the tail in RPO. 1470 if (pred->_rpo > head()->_rpo) { 1471 pred->_loop = this; 1472 worklist.push(pred); 1473 } 1474 } else if (pred_loop != this) { 1475 // Nested loop. 1476 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1477 pred_loop = pred_loop->_parent; 1478 } 1479 // Make pred's loop be a child 1480 if (pred_loop->_parent == NULL) { 1481 add_nested_loop(pred_loop); 1482 // Continue with loop entry predecessor. 1483 Block* pred_head = pred_loop->head(); 1484 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1485 assert(pred_head != head(), "loop head in only one loop"); 1486 push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk); 1487 } else { 1488 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1489 } 1490 } 1491 } 1492 1493 //------------------------------add_nested_loop-------------------------------- 1494 // Make cl a child of the current loop in the loop tree. 1495 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1496 assert(_parent == NULL, "no parent yet"); 1497 assert(cl != this, "not my own parent"); 1498 cl->_parent = this; 1499 CFGLoop* ch = _child; 1500 if (ch == NULL) { 1501 _child = cl; 1502 } else { 1503 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1504 ch->_sibling = cl; 1505 } 1506 } 1507 1508 //------------------------------compute_loop_depth----------------------------- 1509 // Store the loop depth in each CFGLoop object. 1510 // Recursively walk the children to do the same for them. 1511 void CFGLoop::compute_loop_depth(int depth) { 1512 _depth = depth; 1513 CFGLoop* ch = _child; 1514 while (ch != NULL) { 1515 ch->compute_loop_depth(depth + 1); 1516 ch = ch->_sibling; 1517 } 1518 } 1519 1520 //------------------------------compute_freq----------------------------------- 1521 // Compute the frequency of each block and loop, relative to a single entry 1522 // into the dominating loop head. 1523 void CFGLoop::compute_freq() { 1524 // Bottom up traversal of loop tree (visit inner loops first.) 1525 // Set loop head frequency to 1.0, then transitively 1526 // compute frequency for all successors in the loop, 1527 // as well as for each exit edge. Inner loops are 1528 // treated as single blocks with loop exit targets 1529 // as the successor blocks. 1530 1531 // Nested loops first 1532 CFGLoop* ch = _child; 1533 while (ch != NULL) { 1534 ch->compute_freq(); 1535 ch = ch->_sibling; 1536 } 1537 assert (_members.length() > 0, "no empty loops"); 1538 Block* hd = head(); 1539 hd->_freq = 1.0f; 1540 for (int i = 0; i < _members.length(); i++) { 1541 CFGElement* s = _members.at(i); 1542 float freq = s->_freq; 1543 if (s->is_block()) { 1544 Block* b = s->as_Block(); 1545 for (uint j = 0; j < b->_num_succs; j++) { 1546 Block* sb = b->_succs[j]; 1547 update_succ_freq(sb, freq * b->succ_prob(j)); 1548 } 1549 } else { 1550 CFGLoop* lp = s->as_CFGLoop(); 1551 assert(lp->_parent == this, "immediate child"); 1552 for (int k = 0; k < lp->_exits.length(); k++) { 1553 Block* eb = lp->_exits.at(k).get_target(); 1554 float prob = lp->_exits.at(k).get_prob(); 1555 update_succ_freq(eb, freq * prob); 1556 } 1557 } 1558 } 1559 1560 #if 0 1561 // Raise frequency of the loop backedge block, in an effort 1562 // to keep it empty. Skip the method level "loop". 1563 if (_parent != NULL) { 1564 CFGElement* s = _members.at(_members.length() - 1); 1565 if (s->is_block()) { 1566 Block* bk = s->as_Block(); 1567 if (bk->_num_succs == 1 && bk->_succs[0] == hd) { 1568 // almost any value >= 1.0f works 1569 // FIXME: raw constant 1570 bk->_freq = 1.05f; 1571 } 1572 } 1573 } 1574 #endif 1575 1576 // For all loops other than the outer, "method" loop, 1577 // sum and normalize the exit probability. The "method" loop 1578 // should keep the initial exit probability of 1, so that 1579 // inner blocks do not get erroneously scaled. 1580 if (_depth != 0) { 1581 // Total the exit probabilities for this loop. 1582 float exits_sum = 0.0f; 1583 for (int i = 0; i < _exits.length(); i++) { 1584 exits_sum += _exits.at(i).get_prob(); 1585 } 1586 1587 // Normalize the exit probabilities. Until now, the 1588 // probabilities estimate the possibility of exit per 1589 // a single loop iteration; afterward, they estimate 1590 // the probability of exit per loop entry. 1591 for (int i = 0; i < _exits.length(); i++) { 1592 Block* et = _exits.at(i).get_target(); 1593 float new_prob = _exits.at(i).get_prob() / exits_sum; 1594 BlockProbPair bpp(et, new_prob); 1595 _exits.at_put(i, bpp); 1596 } 1597 1598 // Save the total, but guard against unreasoable probability, 1599 // as the value is used to estimate the loop trip count. 1600 // An infinite trip count would blur relative block 1601 // frequencies. 1602 if (exits_sum > 1.0f) exits_sum = 1.0; 1603 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1604 _exit_prob = exits_sum; 1605 } 1606 } 1607 1608 //------------------------------succ_prob------------------------------------- 1609 // Determine the probability of reaching successor 'i' from the receiver block. 1610 float Block::succ_prob(uint i) { 1611 int eidx = end_idx(); 1612 Node *n = _nodes[eidx]; // Get ending Node 1613 int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode(); 1614 1615 // Switch on branch type 1616 switch( op ) { 1617 case Op_CountedLoopEnd: 1618 case Op_If: { 1619 assert (i < 2, "just checking"); 1620 // Conditionals pass on only part of their frequency 1621 float prob = n->as_MachIf()->_prob; 1622 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1623 // If succ[i] is the FALSE branch, invert path info 1624 if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) { 1625 return 1.0f - prob; // not taken 1626 } else { 1627 return prob; // taken 1628 } 1629 } 1630 1631 case Op_Jump: 1632 // Divide the frequency between all successors evenly 1633 return 1.0f/_num_succs; 1634 1635 case Op_Catch: { 1636 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); 1637 if (ci->_con == CatchProjNode::fall_through_index) { 1638 // Fall-thru path gets the lion's share. 1639 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1640 } else { 1641 // Presume exceptional paths are equally unlikely 1642 return PROB_UNLIKELY_MAG(5); 1643 } 1644 } 1645 1646 case Op_Root: 1647 case Op_Goto: 1648 // Pass frequency straight thru to target 1649 return 1.0f; 1650 1651 case Op_NeverBranch: 1652 return 0.0f; 1653 1654 case Op_TailCall: 1655 case Op_TailJump: 1656 case Op_Return: 1657 case Op_Halt: 1658 case Op_Rethrow: 1659 // Do not push out freq to root block 1660 return 0.0f; 1661 1662 default: 1663 ShouldNotReachHere(); 1664 } 1665 1666 return 0.0f; 1667 } 1668 1669 //------------------------------update_succ_freq------------------------------- 1670 // Update the appropriate frequency associated with block 'b', a succesor of 1671 // a block in this loop. 1672 void CFGLoop::update_succ_freq(Block* b, float freq) { 1673 if (b->_loop == this) { 1674 if (b == head()) { 1675 // back branch within the loop 1676 // Do nothing now, the loop carried frequency will be 1677 // adjust later in scale_freq(). 1678 } else { 1679 // simple branch within the loop 1680 b->_freq += freq; 1681 } 1682 } else if (!in_loop_nest(b)) { 1683 // branch is exit from this loop 1684 BlockProbPair bpp(b, freq); 1685 _exits.append(bpp); 1686 } else { 1687 // branch into nested loop 1688 CFGLoop* ch = b->_loop; 1689 ch->_freq += freq; 1690 } 1691 } 1692 1693 //------------------------------in_loop_nest----------------------------------- 1694 // Determine if block b is in the receiver's loop nest. 1695 bool CFGLoop::in_loop_nest(Block* b) { 1696 int depth = _depth; 1697 CFGLoop* b_loop = b->_loop; 1698 int b_depth = b_loop->_depth; 1699 if (depth == b_depth) { 1700 return true; 1701 } 1702 while (b_depth > depth) { 1703 b_loop = b_loop->_parent; 1704 b_depth = b_loop->_depth; 1705 } 1706 return b_loop == this; 1707 } 1708 1709 //------------------------------scale_freq------------------------------------- 1710 // Scale frequency of loops and blocks by trip counts from outer loops 1711 // Do a top down traversal of loop tree (visit outer loops first.) 1712 void CFGLoop::scale_freq() { 1713 float loop_freq = _freq * trip_count(); 1714 for (int i = 0; i < _members.length(); i++) { 1715 CFGElement* s = _members.at(i); 1716 s->_freq *= loop_freq; 1717 } 1718 CFGLoop* ch = _child; 1719 while (ch != NULL) { 1720 ch->scale_freq(); 1721 ch = ch->_sibling; 1722 } 1723 } 1724 1725 #ifndef PRODUCT 1726 //------------------------------dump_tree-------------------------------------- 1727 void CFGLoop::dump_tree() const { 1728 dump(); 1729 if (_child != NULL) _child->dump_tree(); 1730 if (_sibling != NULL) _sibling->dump_tree(); 1731 } 1732 1733 //------------------------------dump------------------------------------------- 1734 void CFGLoop::dump() const { 1735 for (int i = 0; i < _depth; i++) tty->print(" "); 1736 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 1737 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 1738 for (int i = 0; i < _depth; i++) tty->print(" "); 1739 tty->print(" members:", _id); 1740 int k = 0; 1741 for (int i = 0; i < _members.length(); i++) { 1742 if (k++ >= 6) { 1743 tty->print("\n "); 1744 for (int j = 0; j < _depth+1; j++) tty->print(" "); 1745 k = 0; 1746 } 1747 CFGElement *s = _members.at(i); 1748 if (s->is_block()) { 1749 Block *b = s->as_Block(); 1750 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 1751 } else { 1752 CFGLoop* lp = s->as_CFGLoop(); 1753 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 1754 } 1755 } 1756 tty->print("\n"); 1757 for (int i = 0; i < _depth; i++) tty->print(" "); 1758 tty->print(" exits: "); 1759 k = 0; 1760 for (int i = 0; i < _exits.length(); i++) { 1761 if (k++ >= 7) { 1762 tty->print("\n "); 1763 for (int j = 0; j < _depth+1; j++) tty->print(" "); 1764 k = 0; 1765 } 1766 Block *blk = _exits.at(i).get_target(); 1767 float prob = _exits.at(i).get_prob(); 1768 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 1769 } 1770 tty->print("\n"); 1771 } 1772 #endif