1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "runtime/deoptimization.hpp" 38 39 // Portions of code courtesy of Clifford Click 40 41 // Optimization - Graph Style 42 43 // To avoid float value underflow 44 #define MIN_BLOCK_FREQUENCY 1.e-35f 45 46 //----------------------------schedule_node_into_block------------------------- 47 // Insert node n into block b. Look for projections of n and make sure they 48 // are in b also. 49 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 50 // Set basic block of n, Add n to b, 51 map_node_to_block(n, b); 52 b->add_inst(n); 53 54 // After Matching, nearly any old Node may have projections trailing it. 55 // These are usually machine-dependent flags. In any case, they might 56 // float to another block below this one. Move them up. 57 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 58 Node* use = n->fast_out(i); 59 if (use->is_Proj()) { 60 Block* buse = get_block_for_node(use); 61 if (buse != b) { // In wrong block? 62 if (buse != NULL) { 63 buse->find_remove(use); // Remove from wrong block 64 } 65 map_node_to_block(use, b); 66 b->add_inst(use); 67 } 68 } 69 } 70 } 71 72 //----------------------------replace_block_proj_ctrl------------------------- 73 // Nodes that have is_block_proj() nodes as their control need to use 74 // the appropriate Region for their actual block as their control since 75 // the projection will be in a predecessor block. 76 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 77 const Node *in0 = n->in(0); 78 assert(in0 != NULL, "Only control-dependent"); 79 const Node *p = in0->is_block_proj(); 80 if (p != NULL && p != n) { // Control from a block projection? 81 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 82 // Find trailing Region 83 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 84 uint j = 0; 85 if (pb->_num_succs != 1) { // More then 1 successor? 86 // Search for successor 87 uint max = pb->number_of_nodes(); 88 assert( max > 1, "" ); 89 uint start = max - pb->_num_succs; 90 // Find which output path belongs to projection 91 for (j = start; j < max; j++) { 92 if( pb->get_node(j) == in0 ) 93 break; 94 } 95 assert( j < max, "must find" ); 96 // Change control to match head of successor basic block 97 j -= start; 98 } 99 n->set_req(0, pb->_succs[j]->head()); 100 } 101 } 102 103 static bool is_dominator(Block* d, Block* n) { 104 return d->dom_lca(n) == d; 105 } 106 107 //------------------------------schedule_pinned_nodes-------------------------- 108 // Set the basic block for Nodes pinned into blocks 109 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 110 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 111 GrowableArray <Node *> spstack(C->live_nodes() + 8); 112 spstack.push(_root); 113 while (spstack.is_nonempty()) { 114 Node* node = spstack.pop(); 115 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 116 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 117 assert(node->in(0), "pinned Node must have Control"); 118 // Before setting block replace block_proj control edge 119 replace_block_proj_ctrl(node); 120 Node* input = node->in(0); 121 while (!input->is_block_start()) { 122 input = input->in(0); 123 } 124 Block* block = get_block_for_node(input); // Basic block of controlling input 125 schedule_node_into_block(node, block); 126 } 127 128 // If the node has precedence edges (added when CastPP nodes are 129 // removed in final_graph_reshaping), fix the control of the 130 // node to cover the precedence edges and remove the 131 // dependencies. 132 Node* n = NULL; 133 for (uint i = node->len()-1; i >= node->req(); i--) { 134 Node* m = node->in(i); 135 if (m == NULL) continue; 136 // Skip the precedence edge if the test that guarded a CastPP: 137 // - was optimized out during escape analysis 138 // (OptimizePtrCompare): the CastPP's control isn't an end of 139 // block. 140 // - is moved in the branch of a dominating If: the control of 141 // the CastPP is then a Region. 142 if (m->is_block_proj() || m->is_block_start()) { 143 node->rm_prec(i); 144 if (n == NULL) { 145 n = m; 146 } else { 147 Block* bn = get_block_for_node(n); 148 Block* bm = get_block_for_node(m); 149 assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other"); 150 n = is_dominator(bn, bm) ? m : n; 151 } 152 } 153 } 154 if (n != NULL) { 155 assert(node->in(0), "control should have been set"); 156 Block* bn = get_block_for_node(n); 157 Block* bnode = get_block_for_node(node->in(0)); 158 assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other"); 159 if (!is_dominator(bn, bnode)) { 160 node->set_req(0, n); 161 } 162 } 163 164 // process all inputs that are non NULL 165 for (int i = node->req() - 1; i >= 0; --i) { 166 if (node->in(i) != NULL) { 167 spstack.push(node->in(i)); 168 } 169 } 170 } 171 } 172 } 173 174 #ifdef ASSERT 175 // Assert that new input b2 is dominated by all previous inputs. 176 // Check this by by seeing that it is dominated by b1, the deepest 177 // input observed until b2. 178 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 179 if (b1 == NULL) return; 180 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 181 Block* tmp = b2; 182 while (tmp != b1 && tmp != NULL) { 183 tmp = tmp->_idom; 184 } 185 if (tmp != b1) { 186 // Detected an unschedulable graph. Print some nice stuff and die. 187 tty->print_cr("!!! Unschedulable graph !!!"); 188 for (uint j=0; j<n->len(); j++) { // For all inputs 189 Node* inn = n->in(j); // Get input 190 if (inn == NULL) continue; // Ignore NULL, missing inputs 191 Block* inb = cfg->get_block_for_node(inn); 192 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 193 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 194 inn->dump(); 195 } 196 tty->print("Failing node: "); 197 n->dump(); 198 assert(false, "unscheduable graph"); 199 } 200 } 201 #endif 202 203 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 204 // Find the last input dominated by all other inputs. 205 Block* deepb = NULL; // Deepest block so far 206 int deepb_dom_depth = 0; 207 for (uint k = 0; k < n->len(); k++) { // For all inputs 208 Node* inn = n->in(k); // Get input 209 if (inn == NULL) continue; // Ignore NULL, missing inputs 210 Block* inb = cfg->get_block_for_node(inn); 211 assert(inb != NULL, "must already have scheduled this input"); 212 if (deepb_dom_depth < (int) inb->_dom_depth) { 213 // The new inb must be dominated by the previous deepb. 214 // The various inputs must be linearly ordered in the dom 215 // tree, or else there will not be a unique deepest block. 216 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 217 deepb = inb; // Save deepest block 218 deepb_dom_depth = deepb->_dom_depth; 219 } 220 } 221 assert(deepb != NULL, "must be at least one input to n"); 222 return deepb; 223 } 224 225 226 //------------------------------schedule_early--------------------------------- 227 // Find the earliest Block any instruction can be placed in. Some instructions 228 // are pinned into Blocks. Unpinned instructions can appear in last block in 229 // which all their inputs occur. 230 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 231 // Allocate stack with enough space to avoid frequent realloc 232 Node_Stack nstack(roots.Size() + 8); 233 // _root will be processed among C->top() inputs 234 roots.push(C->top()); 235 visited.set(C->top()->_idx); 236 237 while (roots.size() != 0) { 238 // Use local variables nstack_top_n & nstack_top_i to cache values 239 // on stack's top. 240 Node* parent_node = roots.pop(); 241 uint input_index = 0; 242 243 while (true) { 244 if (input_index == 0) { 245 // Fixup some control. Constants without control get attached 246 // to root and nodes that use is_block_proj() nodes should be attached 247 // to the region that starts their block. 248 const Node* control_input = parent_node->in(0); 249 if (control_input != NULL) { 250 replace_block_proj_ctrl(parent_node); 251 } else { 252 // Is a constant with NO inputs? 253 if (parent_node->req() == 1) { 254 parent_node->set_req(0, _root); 255 } 256 } 257 } 258 259 // First, visit all inputs and force them to get a block. If an 260 // input is already in a block we quit following inputs (to avoid 261 // cycles). Instead we put that Node on a worklist to be handled 262 // later (since IT'S inputs may not have a block yet). 263 264 // Assume all n's inputs will be processed 265 bool done = true; 266 267 while (input_index < parent_node->len()) { 268 Node* in = parent_node->in(input_index++); 269 if (in == NULL) { 270 continue; 271 } 272 273 int is_visited = visited.test_set(in->_idx); 274 if (!has_block(in)) { 275 if (is_visited) { 276 return false; 277 } 278 // Save parent node and next input's index. 279 nstack.push(parent_node, input_index); 280 // Process current input now. 281 parent_node = in; 282 input_index = 0; 283 // Not all n's inputs processed. 284 done = false; 285 break; 286 } else if (!is_visited) { 287 // Visit this guy later, using worklist 288 roots.push(in); 289 } 290 } 291 292 if (done) { 293 // All of n's inputs have been processed, complete post-processing. 294 295 // Some instructions are pinned into a block. These include Region, 296 // Phi, Start, Return, and other control-dependent instructions and 297 // any projections which depend on them. 298 if (!parent_node->pinned()) { 299 // Set earliest legal block. 300 Block* earliest_block = find_deepest_input(parent_node, this); 301 map_node_to_block(parent_node, earliest_block); 302 } else { 303 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 304 } 305 306 if (nstack.is_empty()) { 307 // Finished all nodes on stack. 308 // Process next node on the worklist 'roots'. 309 break; 310 } 311 // Get saved parent node and next input's index. 312 parent_node = nstack.node(); 313 input_index = nstack.index(); 314 nstack.pop(); 315 } 316 } 317 } 318 return true; 319 } 320 321 //------------------------------dom_lca---------------------------------------- 322 // Find least common ancestor in dominator tree 323 // LCA is a current notion of LCA, to be raised above 'this'. 324 // As a convenient boundary condition, return 'this' if LCA is NULL. 325 // Find the LCA of those two nodes. 326 Block* Block::dom_lca(Block* LCA) { 327 if (LCA == NULL || LCA == this) return this; 328 329 Block* anc = this; 330 while (anc->_dom_depth > LCA->_dom_depth) 331 anc = anc->_idom; // Walk up till anc is as high as LCA 332 333 while (LCA->_dom_depth > anc->_dom_depth) 334 LCA = LCA->_idom; // Walk up till LCA is as high as anc 335 336 while (LCA != anc) { // Walk both up till they are the same 337 LCA = LCA->_idom; 338 anc = anc->_idom; 339 } 340 341 return LCA; 342 } 343 344 //--------------------------raise_LCA_above_use-------------------------------- 345 // We are placing a definition, and have been given a def->use edge. 346 // The definition must dominate the use, so move the LCA upward in the 347 // dominator tree to dominate the use. If the use is a phi, adjust 348 // the LCA only with the phi input paths which actually use this def. 349 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 350 Block* buse = cfg->get_block_for_node(use); 351 if (buse == NULL) return LCA; // Unused killing Projs have no use block 352 if (!use->is_Phi()) return buse->dom_lca(LCA); 353 uint pmax = use->req(); // Number of Phi inputs 354 // Why does not this loop just break after finding the matching input to 355 // the Phi? Well...it's like this. I do not have true def-use/use-def 356 // chains. Means I cannot distinguish, from the def-use direction, which 357 // of many use-defs lead from the same use to the same def. That is, this 358 // Phi might have several uses of the same def. Each use appears in a 359 // different predecessor block. But when I enter here, I cannot distinguish 360 // which use-def edge I should find the predecessor block for. So I find 361 // them all. Means I do a little extra work if a Phi uses the same value 362 // more than once. 363 for (uint j=1; j<pmax; j++) { // For all inputs 364 if (use->in(j) == def) { // Found matching input? 365 Block* pred = cfg->get_block_for_node(buse->pred(j)); 366 LCA = pred->dom_lca(LCA); 367 } 368 } 369 return LCA; 370 } 371 372 //----------------------------raise_LCA_above_marks---------------------------- 373 // Return a new LCA that dominates LCA and any of its marked predecessors. 374 // Search all my parents up to 'early' (exclusive), looking for predecessors 375 // which are marked with the given index. Return the LCA (in the dom tree) 376 // of all marked blocks. If there are none marked, return the original 377 // LCA. 378 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 379 Block_List worklist; 380 worklist.push(LCA); 381 while (worklist.size() > 0) { 382 Block* mid = worklist.pop(); 383 if (mid == early) continue; // stop searching here 384 385 // Test and set the visited bit. 386 if (mid->raise_LCA_visited() == mark) continue; // already visited 387 388 // Don't process the current LCA, otherwise the search may terminate early 389 if (mid != LCA && mid->raise_LCA_mark() == mark) { 390 // Raise the LCA. 391 LCA = mid->dom_lca(LCA); 392 if (LCA == early) break; // stop searching everywhere 393 assert(early->dominates(LCA), "early is high enough"); 394 // Resume searching at that point, skipping intermediate levels. 395 worklist.push(LCA); 396 if (LCA == mid) 397 continue; // Don't mark as visited to avoid early termination. 398 } else { 399 // Keep searching through this block's predecessors. 400 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 401 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 402 worklist.push(mid_parent); 403 } 404 } 405 mid->set_raise_LCA_visited(mark); 406 } 407 return LCA; 408 } 409 410 //--------------------------memory_early_block-------------------------------- 411 // This is a variation of find_deepest_input, the heart of schedule_early. 412 // Find the "early" block for a load, if we considered only memory and 413 // address inputs, that is, if other data inputs were ignored. 414 // 415 // Because a subset of edges are considered, the resulting block will 416 // be earlier (at a shallower dom_depth) than the true schedule_early 417 // point of the node. We compute this earlier block as a more permissive 418 // site for anti-dependency insertion, but only if subsume_loads is enabled. 419 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 420 Node* base; 421 Node* index; 422 Node* store = load->in(MemNode::Memory); 423 load->as_Mach()->memory_inputs(base, index); 424 425 assert(base != NodeSentinel && index != NodeSentinel, 426 "unexpected base/index inputs"); 427 428 Node* mem_inputs[4]; 429 int mem_inputs_length = 0; 430 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 431 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 432 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 433 434 // In the comparision below, add one to account for the control input, 435 // which may be null, but always takes up a spot in the in array. 436 if (mem_inputs_length + 1 < (int) load->req()) { 437 // This "load" has more inputs than just the memory, base and index inputs. 438 // For purposes of checking anti-dependences, we need to start 439 // from the early block of only the address portion of the instruction, 440 // and ignore other blocks that may have factored into the wider 441 // schedule_early calculation. 442 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 443 444 Block* deepb = NULL; // Deepest block so far 445 int deepb_dom_depth = 0; 446 for (int i = 0; i < mem_inputs_length; i++) { 447 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 448 if (deepb_dom_depth < (int) inb->_dom_depth) { 449 // The new inb must be dominated by the previous deepb. 450 // The various inputs must be linearly ordered in the dom 451 // tree, or else there will not be a unique deepest block. 452 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 453 deepb = inb; // Save deepest block 454 deepb_dom_depth = deepb->_dom_depth; 455 } 456 } 457 early = deepb; 458 } 459 460 return early; 461 } 462 463 //--------------------------insert_anti_dependences--------------------------- 464 // A load may need to witness memory that nearby stores can overwrite. 465 // For each nearby store, either insert an "anti-dependence" edge 466 // from the load to the store, or else move LCA upward to force the 467 // load to (eventually) be scheduled in a block above the store. 468 // 469 // Do not add edges to stores on distinct control-flow paths; 470 // only add edges to stores which might interfere. 471 // 472 // Return the (updated) LCA. There will not be any possibly interfering 473 // store between the load's "early block" and the updated LCA. 474 // Any stores in the updated LCA will have new precedence edges 475 // back to the load. The caller is expected to schedule the load 476 // in the LCA, in which case the precedence edges will make LCM 477 // preserve anti-dependences. The caller may also hoist the load 478 // above the LCA, if it is not the early block. 479 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 480 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 481 assert(LCA != NULL, ""); 482 DEBUG_ONLY(Block* LCA_orig = LCA); 483 484 // Compute the alias index. Loads and stores with different alias indices 485 // do not need anti-dependence edges. 486 int load_alias_idx = C->get_alias_index(load->adr_type()); 487 #ifdef ASSERT 488 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 489 (PrintOpto || VerifyAliases || 490 PrintMiscellaneous && (WizardMode || Verbose))) { 491 // Load nodes should not consume all of memory. 492 // Reporting a bottom type indicates a bug in adlc. 493 // If some particular type of node validly consumes all of memory, 494 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 495 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 496 load->dump(2); 497 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 498 } 499 #endif 500 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 501 "String compare is only known 'load' that does not conflict with any stores"); 502 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 503 "String equals is a 'load' that does not conflict with any stores"); 504 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 505 "String indexOf is a 'load' that does not conflict with any stores"); 506 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 507 "Arrays equals is a 'load' that do not conflict with any stores"); 508 509 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 510 // It is impossible to spoil this load by putting stores before it, 511 // because we know that the stores will never update the value 512 // which 'load' must witness. 513 return LCA; 514 } 515 516 node_idx_t load_index = load->_idx; 517 518 // Note the earliest legal placement of 'load', as determined by 519 // by the unique point in the dom tree where all memory effects 520 // and other inputs are first available. (Computed by schedule_early.) 521 // For normal loads, 'early' is the shallowest place (dom graph wise) 522 // to look for anti-deps between this load and any store. 523 Block* early = get_block_for_node(load); 524 525 // If we are subsuming loads, compute an "early" block that only considers 526 // memory or address inputs. This block may be different than the 527 // schedule_early block in that it could be at an even shallower depth in the 528 // dominator tree, and allow for a broader discovery of anti-dependences. 529 if (C->subsume_loads()) { 530 early = memory_early_block(load, early, this); 531 } 532 533 ResourceArea *area = Thread::current()->resource_area(); 534 Node_List worklist_mem(area); // prior memory state to store 535 Node_List worklist_store(area); // possible-def to explore 536 Node_List worklist_visited(area); // visited mergemem nodes 537 Node_List non_early_stores(area); // all relevant stores outside of early 538 bool must_raise_LCA = false; 539 540 #ifdef TRACK_PHI_INPUTS 541 // %%% This extra checking fails because MergeMem nodes are not GVNed. 542 // Provide "phi_inputs" to check if every input to a PhiNode is from the 543 // original memory state. This indicates a PhiNode for which should not 544 // prevent the load from sinking. For such a block, set_raise_LCA_mark 545 // may be overly conservative. 546 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 547 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 548 #endif 549 550 // 'load' uses some memory state; look for users of the same state. 551 // Recurse through MergeMem nodes to the stores that use them. 552 553 // Each of these stores is a possible definition of memory 554 // that 'load' needs to use. We need to force 'load' 555 // to occur before each such store. When the store is in 556 // the same block as 'load', we insert an anti-dependence 557 // edge load->store. 558 559 // The relevant stores "nearby" the load consist of a tree rooted 560 // at initial_mem, with internal nodes of type MergeMem. 561 // Therefore, the branches visited by the worklist are of this form: 562 // initial_mem -> (MergeMem ->)* store 563 // The anti-dependence constraints apply only to the fringe of this tree. 564 565 Node* initial_mem = load->in(MemNode::Memory); 566 worklist_store.push(initial_mem); 567 worklist_visited.push(initial_mem); 568 worklist_mem.push(NULL); 569 while (worklist_store.size() > 0) { 570 // Examine a nearby store to see if it might interfere with our load. 571 Node* mem = worklist_mem.pop(); 572 Node* store = worklist_store.pop(); 573 uint op = store->Opcode(); 574 575 // MergeMems do not directly have anti-deps. 576 // Treat them as internal nodes in a forward tree of memory states, 577 // the leaves of which are each a 'possible-def'. 578 if (store == initial_mem // root (exclusive) of tree we are searching 579 || op == Op_MergeMem // internal node of tree we are searching 580 ) { 581 mem = store; // It's not a possibly interfering store. 582 if (store == initial_mem) 583 initial_mem = NULL; // only process initial memory once 584 585 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 586 store = mem->fast_out(i); 587 if (store->is_MergeMem()) { 588 // Be sure we don't get into combinatorial problems. 589 // (Allow phis to be repeated; they can merge two relevant states.) 590 uint j = worklist_visited.size(); 591 for (; j > 0; j--) { 592 if (worklist_visited.at(j-1) == store) break; 593 } 594 if (j > 0) continue; // already on work list; do not repeat 595 worklist_visited.push(store); 596 } 597 worklist_mem.push(mem); 598 worklist_store.push(store); 599 } 600 continue; 601 } 602 603 if (op == Op_MachProj || op == Op_Catch) continue; 604 if (store->needs_anti_dependence_check()) continue; // not really a store 605 606 // Compute the alias index. Loads and stores with different alias 607 // indices do not need anti-dependence edges. Wide MemBar's are 608 // anti-dependent on everything (except immutable memories). 609 const TypePtr* adr_type = store->adr_type(); 610 if (!C->can_alias(adr_type, load_alias_idx)) continue; 611 612 // Most slow-path runtime calls do NOT modify Java memory, but 613 // they can block and so write Raw memory. 614 if (store->is_Mach()) { 615 MachNode* mstore = store->as_Mach(); 616 if (load_alias_idx != Compile::AliasIdxRaw) { 617 // Check for call into the runtime using the Java calling 618 // convention (and from there into a wrapper); it has no 619 // _method. Can't do this optimization for Native calls because 620 // they CAN write to Java memory. 621 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 622 assert(mstore->is_MachSafePoint(), ""); 623 MachSafePointNode* ms = (MachSafePointNode*) mstore; 624 assert(ms->is_MachCallJava(), ""); 625 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 626 if (mcj->_method == NULL) { 627 // These runtime calls do not write to Java visible memory 628 // (other than Raw) and so do not require anti-dependence edges. 629 continue; 630 } 631 } 632 // Same for SafePoints: they read/write Raw but only read otherwise. 633 // This is basically a workaround for SafePoints only defining control 634 // instead of control + memory. 635 if (mstore->ideal_Opcode() == Op_SafePoint) 636 continue; 637 } else { 638 // Some raw memory, such as the load of "top" at an allocation, 639 // can be control dependent on the previous safepoint. See 640 // comments in GraphKit::allocate_heap() about control input. 641 // Inserting an anti-dep between such a safepoint and a use 642 // creates a cycle, and will cause a subsequent failure in 643 // local scheduling. (BugId 4919904) 644 // (%%% How can a control input be a safepoint and not a projection??) 645 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 646 continue; 647 } 648 } 649 650 // Identify a block that the current load must be above, 651 // or else observe that 'store' is all the way up in the 652 // earliest legal block for 'load'. In the latter case, 653 // immediately insert an anti-dependence edge. 654 Block* store_block = get_block_for_node(store); 655 assert(store_block != NULL, "unused killing projections skipped above"); 656 657 if (store->is_Phi()) { 658 // 'load' uses memory which is one (or more) of the Phi's inputs. 659 // It must be scheduled not before the Phi, but rather before 660 // each of the relevant Phi inputs. 661 // 662 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 663 // we mark each corresponding predecessor block and do a combined 664 // hoisting operation later (raise_LCA_above_marks). 665 // 666 // Do not assert(store_block != early, "Phi merging memory after access") 667 // PhiNode may be at start of block 'early' with backedge to 'early' 668 DEBUG_ONLY(bool found_match = false); 669 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 670 if (store->in(j) == mem) { // Found matching input? 671 DEBUG_ONLY(found_match = true); 672 Block* pred_block = get_block_for_node(store_block->pred(j)); 673 if (pred_block != early) { 674 // If any predecessor of the Phi matches the load's "early block", 675 // we do not need a precedence edge between the Phi and 'load' 676 // since the load will be forced into a block preceding the Phi. 677 pred_block->set_raise_LCA_mark(load_index); 678 assert(!LCA_orig->dominates(pred_block) || 679 early->dominates(pred_block), "early is high enough"); 680 must_raise_LCA = true; 681 } else { 682 // anti-dependent upon PHI pinned below 'early', no edge needed 683 LCA = early; // but can not schedule below 'early' 684 } 685 } 686 } 687 assert(found_match, "no worklist bug"); 688 #ifdef TRACK_PHI_INPUTS 689 #ifdef ASSERT 690 // This assert asks about correct handling of PhiNodes, which may not 691 // have all input edges directly from 'mem'. See BugId 4621264 692 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 693 // Increment by exactly one even if there are multiple copies of 'mem' 694 // coming into the phi, because we will run this block several times 695 // if there are several copies of 'mem'. (That's how DU iterators work.) 696 phi_inputs.at_put(store->_idx, num_mem_inputs); 697 assert(PhiNode::Input + num_mem_inputs < store->req(), 698 "Expect at least one phi input will not be from original memory state"); 699 #endif //ASSERT 700 #endif //TRACK_PHI_INPUTS 701 } else if (store_block != early) { 702 // 'store' is between the current LCA and earliest possible block. 703 // Label its block, and decide later on how to raise the LCA 704 // to include the effect on LCA of this store. 705 // If this store's block gets chosen as the raised LCA, we 706 // will find him on the non_early_stores list and stick him 707 // with a precedence edge. 708 // (But, don't bother if LCA is already raised all the way.) 709 if (LCA != early) { 710 store_block->set_raise_LCA_mark(load_index); 711 must_raise_LCA = true; 712 non_early_stores.push(store); 713 } 714 } else { 715 // Found a possibly-interfering store in the load's 'early' block. 716 // This means 'load' cannot sink at all in the dominator tree. 717 // Add an anti-dep edge, and squeeze 'load' into the highest block. 718 assert(store != load->in(0), "dependence cycle found"); 719 if (verify) { 720 assert(store->find_edge(load) != -1, "missing precedence edge"); 721 } else { 722 store->add_prec(load); 723 } 724 LCA = early; 725 // This turns off the process of gathering non_early_stores. 726 } 727 } 728 // (Worklist is now empty; all nearby stores have been visited.) 729 730 // Finished if 'load' must be scheduled in its 'early' block. 731 // If we found any stores there, they have already been given 732 // precedence edges. 733 if (LCA == early) return LCA; 734 735 // We get here only if there are no possibly-interfering stores 736 // in the load's 'early' block. Move LCA up above all predecessors 737 // which contain stores we have noted. 738 // 739 // The raised LCA block can be a home to such interfering stores, 740 // but its predecessors must not contain any such stores. 741 // 742 // The raised LCA will be a lower bound for placing the load, 743 // preventing the load from sinking past any block containing 744 // a store that may invalidate the memory state required by 'load'. 745 if (must_raise_LCA) 746 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 747 if (LCA == early) return LCA; 748 749 // Insert anti-dependence edges from 'load' to each store 750 // in the non-early LCA block. 751 // Mine the non_early_stores list for such stores. 752 if (LCA->raise_LCA_mark() == load_index) { 753 while (non_early_stores.size() > 0) { 754 Node* store = non_early_stores.pop(); 755 Block* store_block = get_block_for_node(store); 756 if (store_block == LCA) { 757 // add anti_dependence from store to load in its own block 758 assert(store != load->in(0), "dependence cycle found"); 759 if (verify) { 760 assert(store->find_edge(load) != -1, "missing precedence edge"); 761 } else { 762 store->add_prec(load); 763 } 764 } else { 765 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 766 // Any other stores we found must be either inside the new LCA 767 // or else outside the original LCA. In the latter case, they 768 // did not interfere with any use of 'load'. 769 assert(LCA->dominates(store_block) 770 || !LCA_orig->dominates(store_block), "no stray stores"); 771 } 772 } 773 } 774 775 // Return the highest block containing stores; any stores 776 // within that block have been given anti-dependence edges. 777 return LCA; 778 } 779 780 // This class is used to iterate backwards over the nodes in the graph. 781 782 class Node_Backward_Iterator { 783 784 private: 785 Node_Backward_Iterator(); 786 787 public: 788 // Constructor for the iterator 789 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg); 790 791 // Postincrement operator to iterate over the nodes 792 Node *next(); 793 794 private: 795 VectorSet &_visited; 796 Node_List &_stack; 797 PhaseCFG &_cfg; 798 }; 799 800 // Constructor for the Node_Backward_Iterator 801 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg) 802 : _visited(visited), _stack(stack), _cfg(cfg) { 803 // The stack should contain exactly the root 804 stack.clear(); 805 stack.push(root); 806 807 // Clear the visited bits 808 visited.Clear(); 809 } 810 811 // Iterator for the Node_Backward_Iterator 812 Node *Node_Backward_Iterator::next() { 813 814 // If the _stack is empty, then just return NULL: finished. 815 if ( !_stack.size() ) 816 return NULL; 817 818 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 819 // made stateless, so I do not need to record the index 'i' on my _stack. 820 // Instead I visit all users each time, scanning for unvisited users. 821 // I visit unvisited not-anti-dependence users first, then anti-dependent 822 // children next. 823 Node *self = _stack.pop(); 824 825 // I cycle here when I am entering a deeper level of recursion. 826 // The key variable 'self' was set prior to jumping here. 827 while( 1 ) { 828 829 _visited.set(self->_idx); 830 831 // Now schedule all uses as late as possible. 832 const Node* src = self->is_Proj() ? self->in(0) : self; 833 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 834 835 // Schedule all nodes in a post-order visit 836 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 837 838 // Scan for unvisited nodes 839 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 840 // For all uses, schedule late 841 Node* n = self->fast_out(i); // Use 842 843 // Skip already visited children 844 if ( _visited.test(n->_idx) ) 845 continue; 846 847 // do not traverse backward control edges 848 Node *use = n->is_Proj() ? n->in(0) : n; 849 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 850 851 if ( use_rpo < src_rpo ) 852 continue; 853 854 // Phi nodes always precede uses in a basic block 855 if ( use_rpo == src_rpo && use->is_Phi() ) 856 continue; 857 858 unvisited = n; // Found unvisited 859 860 // Check for possible-anti-dependent 861 if( !n->needs_anti_dependence_check() ) 862 break; // Not visited, not anti-dep; schedule it NOW 863 } 864 865 // Did I find an unvisited not-anti-dependent Node? 866 if ( !unvisited ) 867 break; // All done with children; post-visit 'self' 868 869 // Visit the unvisited Node. Contains the obvious push to 870 // indicate I'm entering a deeper level of recursion. I push the 871 // old state onto the _stack and set a new state and loop (recurse). 872 _stack.push(self); 873 self = unvisited; 874 } // End recursion loop 875 876 return self; 877 } 878 879 //------------------------------ComputeLatenciesBackwards---------------------- 880 // Compute the latency of all the instructions. 881 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { 882 #ifndef PRODUCT 883 if (trace_opto_pipelining()) 884 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 885 #endif 886 887 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 888 Node *n; 889 890 // Walk over all the nodes from last to first 891 while (n = iter.next()) { 892 // Set the latency for the definitions of this instruction 893 partial_latency_of_defs(n); 894 } 895 } // end ComputeLatenciesBackwards 896 897 //------------------------------partial_latency_of_defs------------------------ 898 // Compute the latency impact of this node on all defs. This computes 899 // a number that increases as we approach the beginning of the routine. 900 void PhaseCFG::partial_latency_of_defs(Node *n) { 901 // Set the latency for this instruction 902 #ifndef PRODUCT 903 if (trace_opto_pipelining()) { 904 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 905 dump(); 906 } 907 #endif 908 909 if (n->is_Proj()) { 910 n = n->in(0); 911 } 912 913 if (n->is_Root()) { 914 return; 915 } 916 917 uint nlen = n->len(); 918 uint use_latency = get_latency_for_node(n); 919 uint use_pre_order = get_block_for_node(n)->_pre_order; 920 921 for (uint j = 0; j < nlen; j++) { 922 Node *def = n->in(j); 923 924 if (!def || def == n) { 925 continue; 926 } 927 928 // Walk backwards thru projections 929 if (def->is_Proj()) { 930 def = def->in(0); 931 } 932 933 #ifndef PRODUCT 934 if (trace_opto_pipelining()) { 935 tty->print("# in(%2d): ", j); 936 def->dump(); 937 } 938 #endif 939 940 // If the defining block is not known, assume it is ok 941 Block *def_block = get_block_for_node(def); 942 uint def_pre_order = def_block ? def_block->_pre_order : 0; 943 944 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 945 continue; 946 } 947 948 uint delta_latency = n->latency(j); 949 uint current_latency = delta_latency + use_latency; 950 951 if (get_latency_for_node(def) < current_latency) { 952 set_latency_for_node(def, current_latency); 953 } 954 955 #ifndef PRODUCT 956 if (trace_opto_pipelining()) { 957 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 958 } 959 #endif 960 } 961 } 962 963 //------------------------------latency_from_use------------------------------- 964 // Compute the latency of a specific use 965 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 966 // If self-reference, return no latency 967 if (use == n || use->is_Root()) { 968 return 0; 969 } 970 971 uint def_pre_order = get_block_for_node(def)->_pre_order; 972 uint latency = 0; 973 974 // If the use is not a projection, then it is simple... 975 if (!use->is_Proj()) { 976 #ifndef PRODUCT 977 if (trace_opto_pipelining()) { 978 tty->print("# out(): "); 979 use->dump(); 980 } 981 #endif 982 983 uint use_pre_order = get_block_for_node(use)->_pre_order; 984 985 if (use_pre_order < def_pre_order) 986 return 0; 987 988 if (use_pre_order == def_pre_order && use->is_Phi()) 989 return 0; 990 991 uint nlen = use->len(); 992 uint nl = get_latency_for_node(use); 993 994 for ( uint j=0; j<nlen; j++ ) { 995 if (use->in(j) == n) { 996 // Change this if we want local latencies 997 uint ul = use->latency(j); 998 uint l = ul + nl; 999 if (latency < l) latency = l; 1000 #ifndef PRODUCT 1001 if (trace_opto_pipelining()) { 1002 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 1003 nl, j, ul, l, latency); 1004 } 1005 #endif 1006 } 1007 } 1008 } else { 1009 // This is a projection, just grab the latency of the use(s) 1010 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1011 uint l = latency_from_use(use, def, use->fast_out(j)); 1012 if (latency < l) latency = l; 1013 } 1014 } 1015 1016 return latency; 1017 } 1018 1019 //------------------------------latency_from_uses------------------------------ 1020 // Compute the latency of this instruction relative to all of it's uses. 1021 // This computes a number that increases as we approach the beginning of the 1022 // routine. 1023 void PhaseCFG::latency_from_uses(Node *n) { 1024 // Set the latency for this instruction 1025 #ifndef PRODUCT 1026 if (trace_opto_pipelining()) { 1027 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1028 dump(); 1029 } 1030 #endif 1031 uint latency=0; 1032 const Node *def = n->is_Proj() ? n->in(0): n; 1033 1034 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1035 uint l = latency_from_use(n, def, n->fast_out(i)); 1036 1037 if (latency < l) latency = l; 1038 } 1039 1040 set_latency_for_node(n, latency); 1041 } 1042 1043 //------------------------------hoist_to_cheaper_block------------------------- 1044 // Pick a block for node self, between early and LCA, that is a cheaper 1045 // alternative to LCA. 1046 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1047 const double delta = 1+PROB_UNLIKELY_MAG(4); 1048 Block* least = LCA; 1049 double least_freq = least->_freq; 1050 uint target = get_latency_for_node(self); 1051 uint start_latency = get_latency_for_node(LCA->head()); 1052 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1053 bool in_latency = (target <= start_latency); 1054 const Block* root_block = get_block_for_node(_root); 1055 1056 // Turn off latency scheduling if scheduling is just plain off 1057 if (!C->do_scheduling()) 1058 in_latency = true; 1059 1060 // Do not hoist (to cover latency) instructions which target a 1061 // single register. Hoisting stretches the live range of the 1062 // single register and may force spilling. 1063 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1064 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1065 in_latency = true; 1066 1067 #ifndef PRODUCT 1068 if (trace_opto_pipelining()) { 1069 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1070 self->dump(); 1071 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1072 LCA->_pre_order, 1073 LCA->head()->_idx, 1074 start_latency, 1075 LCA->get_node(LCA->end_idx())->_idx, 1076 end_latency, 1077 least_freq); 1078 } 1079 #endif 1080 1081 int cand_cnt = 0; // number of candidates tried 1082 1083 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1084 // the earliest legal location. Capture the least execution frequency. 1085 while (LCA != early) { 1086 LCA = LCA->_idom; // Follow up the dominator tree 1087 1088 if (LCA == NULL) { 1089 // Bailout without retry 1090 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1091 return least; 1092 } 1093 1094 // Don't hoist machine instructions to the root basic block 1095 if (mach && LCA == root_block) 1096 break; 1097 1098 uint start_lat = get_latency_for_node(LCA->head()); 1099 uint end_idx = LCA->end_idx(); 1100 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1101 double LCA_freq = LCA->_freq; 1102 #ifndef PRODUCT 1103 if (trace_opto_pipelining()) { 1104 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1105 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1106 } 1107 #endif 1108 cand_cnt++; 1109 if (LCA_freq < least_freq || // Better Frequency 1110 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1111 (!StressGCM && // Otherwise, choose with latency 1112 !in_latency && // No block containing latency 1113 LCA_freq < least_freq * delta && // No worse frequency 1114 target >= end_lat && // within latency range 1115 !self->is_iteratively_computed() ) // But don't hoist IV increments 1116 // because they may end up above other uses of their phi forcing 1117 // their result register to be different from their input. 1118 ) { 1119 least = LCA; // Found cheaper block 1120 least_freq = LCA_freq; 1121 start_latency = start_lat; 1122 end_latency = end_lat; 1123 if (target <= start_lat) 1124 in_latency = true; 1125 } 1126 } 1127 1128 #ifndef PRODUCT 1129 if (trace_opto_pipelining()) { 1130 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1131 least->_pre_order, start_latency, least_freq); 1132 } 1133 #endif 1134 1135 // See if the latency needs to be updated 1136 if (target < end_latency) { 1137 #ifndef PRODUCT 1138 if (trace_opto_pipelining()) { 1139 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1140 } 1141 #endif 1142 set_latency_for_node(self, end_latency); 1143 partial_latency_of_defs(self); 1144 } 1145 1146 return least; 1147 } 1148 1149 1150 //------------------------------schedule_late----------------------------------- 1151 // Now schedule all codes as LATE as possible. This is the LCA in the 1152 // dominator tree of all USES of a value. Pick the block with the least 1153 // loop nesting depth that is lowest in the dominator tree. 1154 extern const char must_clone[]; 1155 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1156 #ifndef PRODUCT 1157 if (trace_opto_pipelining()) 1158 tty->print("\n#---- schedule_late ----\n"); 1159 #endif 1160 1161 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1162 Node *self; 1163 1164 // Walk over all the nodes from last to first 1165 while (self = iter.next()) { 1166 Block* early = get_block_for_node(self); // Earliest legal placement 1167 1168 if (self->is_top()) { 1169 // Top node goes in bb #2 with other constants. 1170 // It must be special-cased, because it has no out edges. 1171 early->add_inst(self); 1172 continue; 1173 } 1174 1175 // No uses, just terminate 1176 if (self->outcnt() == 0) { 1177 assert(self->is_MachProj(), "sanity"); 1178 continue; // Must be a dead machine projection 1179 } 1180 1181 // If node is pinned in the block, then no scheduling can be done. 1182 if( self->pinned() ) // Pinned in block? 1183 continue; 1184 1185 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1186 if (mach) { 1187 switch (mach->ideal_Opcode()) { 1188 case Op_CreateEx: 1189 // Don't move exception creation 1190 early->add_inst(self); 1191 continue; 1192 break; 1193 case Op_CheckCastPP: 1194 // Don't move CheckCastPP nodes away from their input, if the input 1195 // is a rawptr (5071820). 1196 Node *def = self->in(1); 1197 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1198 early->add_inst(self); 1199 #ifdef ASSERT 1200 _raw_oops.push(def); 1201 #endif 1202 continue; 1203 } 1204 break; 1205 } 1206 } 1207 1208 // Gather LCA of all uses 1209 Block *LCA = NULL; 1210 { 1211 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1212 // For all uses, find LCA 1213 Node* use = self->fast_out(i); 1214 LCA = raise_LCA_above_use(LCA, use, self, this); 1215 } 1216 } // (Hide defs of imax, i from rest of block.) 1217 1218 // Place temps in the block of their use. This isn't a 1219 // requirement for correctness but it reduces useless 1220 // interference between temps and other nodes. 1221 if (mach != NULL && mach->is_MachTemp()) { 1222 map_node_to_block(self, LCA); 1223 LCA->add_inst(self); 1224 continue; 1225 } 1226 1227 // Check if 'self' could be anti-dependent on memory 1228 if (self->needs_anti_dependence_check()) { 1229 // Hoist LCA above possible-defs and insert anti-dependences to 1230 // defs in new LCA block. 1231 LCA = insert_anti_dependences(LCA, self); 1232 } 1233 1234 if (early->_dom_depth > LCA->_dom_depth) { 1235 // Somehow the LCA has moved above the earliest legal point. 1236 // (One way this can happen is via memory_early_block.) 1237 if (C->subsume_loads() == true && !C->failing()) { 1238 // Retry with subsume_loads == false 1239 // If this is the first failure, the sentinel string will "stick" 1240 // to the Compile object, and the C2Compiler will see it and retry. 1241 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1242 } else { 1243 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1244 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1245 } 1246 return; 1247 } 1248 1249 // If there is no opportunity to hoist, then we're done. 1250 // In stress mode, try to hoist even the single operations. 1251 bool try_to_hoist = StressGCM || (LCA != early); 1252 1253 // Must clone guys stay next to use; no hoisting allowed. 1254 // Also cannot hoist guys that alter memory or are otherwise not 1255 // allocatable (hoisting can make a value live longer, leading to 1256 // anti and output dependency problems which are normally resolved 1257 // by the register allocator giving everyone a different register). 1258 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1259 try_to_hoist = false; 1260 1261 Block* late = NULL; 1262 if (try_to_hoist) { 1263 // Now find the block with the least execution frequency. 1264 // Start at the latest schedule and work up to the earliest schedule 1265 // in the dominator tree. Thus the Node will dominate all its uses. 1266 late = hoist_to_cheaper_block(LCA, early, self); 1267 } else { 1268 // Just use the LCA of the uses. 1269 late = LCA; 1270 } 1271 1272 // Put the node into target block 1273 schedule_node_into_block(self, late); 1274 1275 #ifdef ASSERT 1276 if (self->needs_anti_dependence_check()) { 1277 // since precedence edges are only inserted when we're sure they 1278 // are needed make sure that after placement in a block we don't 1279 // need any new precedence edges. 1280 verify_anti_dependences(late, self); 1281 } 1282 #endif 1283 } // Loop until all nodes have been visited 1284 1285 } // end ScheduleLate 1286 1287 //------------------------------GlobalCodeMotion------------------------------- 1288 void PhaseCFG::global_code_motion() { 1289 ResourceMark rm; 1290 1291 #ifndef PRODUCT 1292 if (trace_opto_pipelining()) { 1293 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1294 } 1295 #endif 1296 1297 // Initialize the node to block mapping for things on the proj_list 1298 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1299 unmap_node_from_block(_matcher.get_projection(i)); 1300 } 1301 1302 // Set the basic block for Nodes pinned into blocks 1303 Arena* arena = Thread::current()->resource_area(); 1304 VectorSet visited(arena); 1305 schedule_pinned_nodes(visited); 1306 1307 // Find the earliest Block any instruction can be placed in. Some 1308 // instructions are pinned into Blocks. Unpinned instructions can 1309 // appear in last block in which all their inputs occur. 1310 visited.Clear(); 1311 Node_List stack(arena); 1312 // Pre-grow the list 1313 stack.map((C->live_nodes() >> 1) + 16, NULL); 1314 if (!schedule_early(visited, stack)) { 1315 // Bailout without retry 1316 C->record_method_not_compilable("early schedule failed"); 1317 return; 1318 } 1319 1320 // Build Def-Use edges. 1321 // Compute the latency information (via backwards walk) for all the 1322 // instructions in the graph 1323 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1324 1325 if (C->do_scheduling()) { 1326 compute_latencies_backwards(visited, stack); 1327 } 1328 1329 // Now schedule all codes as LATE as possible. This is the LCA in the 1330 // dominator tree of all USES of a value. Pick the block with the least 1331 // loop nesting depth that is lowest in the dominator tree. 1332 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1333 schedule_late(visited, stack); 1334 if (C->failing()) { 1335 // schedule_late fails only when graph is incorrect. 1336 assert(!VerifyGraphEdges, "verification should have failed"); 1337 return; 1338 } 1339 1340 #ifndef PRODUCT 1341 if (trace_opto_pipelining()) { 1342 tty->print("\n---- Detect implicit null checks ----\n"); 1343 } 1344 #endif 1345 1346 // Detect implicit-null-check opportunities. Basically, find NULL checks 1347 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1348 // I can generate a memory op if there is not one nearby. 1349 if (C->is_method_compilation()) { 1350 // By reversing the loop direction we get a very minor gain on mpegaudio. 1351 // Feel free to revert to a forward loop for clarity. 1352 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1353 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1354 Node* proj = _matcher._null_check_tests[i]; 1355 Node* val = _matcher._null_check_tests[i + 1]; 1356 Block* block = get_block_for_node(proj); 1357 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1358 // The implicit_null_check will only perform the transformation 1359 // if the null branch is truly uncommon, *and* it leads to an 1360 // uncommon trap. Combined with the too_many_traps guards 1361 // above, this prevents SEGV storms reported in 6366351, 1362 // by recompiling offending methods without this optimization. 1363 } 1364 } 1365 1366 #ifndef PRODUCT 1367 if (trace_opto_pipelining()) { 1368 tty->print("\n---- Start Local Scheduling ----\n"); 1369 } 1370 #endif 1371 1372 // Schedule locally. Right now a simple topological sort. 1373 // Later, do a real latency aware scheduler. 1374 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1375 visited.Clear(); 1376 for (uint i = 0; i < number_of_blocks(); i++) { 1377 Block* block = get_block(i); 1378 if (!schedule_local(block, ready_cnt, visited)) { 1379 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1380 C->record_method_not_compilable("local schedule failed"); 1381 } 1382 return; 1383 } 1384 } 1385 1386 // If we inserted any instructions between a Call and his CatchNode, 1387 // clone the instructions on all paths below the Catch. 1388 for (uint i = 0; i < number_of_blocks(); i++) { 1389 Block* block = get_block(i); 1390 call_catch_cleanup(block); 1391 } 1392 1393 #ifndef PRODUCT 1394 if (trace_opto_pipelining()) { 1395 tty->print("\n---- After GlobalCodeMotion ----\n"); 1396 for (uint i = 0; i < number_of_blocks(); i++) { 1397 Block* block = get_block(i); 1398 block->dump(); 1399 } 1400 } 1401 #endif 1402 // Dead. 1403 _node_latency = (GrowableArray<uint> *)0xdeadbeef; 1404 } 1405 1406 bool PhaseCFG::do_global_code_motion() { 1407 1408 build_dominator_tree(); 1409 if (C->failing()) { 1410 return false; 1411 } 1412 1413 NOT_PRODUCT( C->verify_graph_edges(); ) 1414 1415 estimate_block_frequency(); 1416 1417 global_code_motion(); 1418 1419 if (C->failing()) { 1420 return false; 1421 } 1422 1423 return true; 1424 } 1425 1426 //------------------------------Estimate_Block_Frequency----------------------- 1427 // Estimate block frequencies based on IfNode probabilities. 1428 void PhaseCFG::estimate_block_frequency() { 1429 1430 // Force conditional branches leading to uncommon traps to be unlikely, 1431 // not because we get to the uncommon_trap with less relative frequency, 1432 // but because an uncommon_trap typically causes a deopt, so we only get 1433 // there once. 1434 if (C->do_freq_based_layout()) { 1435 Block_List worklist; 1436 Block* root_blk = get_block(0); 1437 for (uint i = 1; i < root_blk->num_preds(); i++) { 1438 Block *pb = get_block_for_node(root_blk->pred(i)); 1439 if (pb->has_uncommon_code()) { 1440 worklist.push(pb); 1441 } 1442 } 1443 while (worklist.size() > 0) { 1444 Block* uct = worklist.pop(); 1445 if (uct == get_root_block()) { 1446 continue; 1447 } 1448 for (uint i = 1; i < uct->num_preds(); i++) { 1449 Block *pb = get_block_for_node(uct->pred(i)); 1450 if (pb->_num_succs == 1) { 1451 worklist.push(pb); 1452 } else if (pb->num_fall_throughs() == 2) { 1453 pb->update_uncommon_branch(uct); 1454 } 1455 } 1456 } 1457 } 1458 1459 // Create the loop tree and calculate loop depth. 1460 _root_loop = create_loop_tree(); 1461 _root_loop->compute_loop_depth(0); 1462 1463 // Compute block frequency of each block, relative to a single loop entry. 1464 _root_loop->compute_freq(); 1465 1466 // Adjust all frequencies to be relative to a single method entry 1467 _root_loop->_freq = 1.0; 1468 _root_loop->scale_freq(); 1469 1470 // Save outmost loop frequency for LRG frequency threshold 1471 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1472 1473 // force paths ending at uncommon traps to be infrequent 1474 if (!C->do_freq_based_layout()) { 1475 Block_List worklist; 1476 Block* root_blk = get_block(0); 1477 for (uint i = 1; i < root_blk->num_preds(); i++) { 1478 Block *pb = get_block_for_node(root_blk->pred(i)); 1479 if (pb->has_uncommon_code()) { 1480 worklist.push(pb); 1481 } 1482 } 1483 while (worklist.size() > 0) { 1484 Block* uct = worklist.pop(); 1485 uct->_freq = PROB_MIN; 1486 for (uint i = 1; i < uct->num_preds(); i++) { 1487 Block *pb = get_block_for_node(uct->pred(i)); 1488 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1489 worklist.push(pb); 1490 } 1491 } 1492 } 1493 } 1494 1495 #ifdef ASSERT 1496 for (uint i = 0; i < number_of_blocks(); i++) { 1497 Block* b = get_block(i); 1498 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1499 } 1500 #endif 1501 1502 #ifndef PRODUCT 1503 if (PrintCFGBlockFreq) { 1504 tty->print_cr("CFG Block Frequencies"); 1505 _root_loop->dump_tree(); 1506 if (Verbose) { 1507 tty->print_cr("PhaseCFG dump"); 1508 dump(); 1509 tty->print_cr("Node dump"); 1510 _root->dump(99999); 1511 } 1512 } 1513 #endif 1514 } 1515 1516 //----------------------------create_loop_tree-------------------------------- 1517 // Create a loop tree from the CFG 1518 CFGLoop* PhaseCFG::create_loop_tree() { 1519 1520 #ifdef ASSERT 1521 assert(get_block(0) == get_root_block(), "first block should be root block"); 1522 for (uint i = 0; i < number_of_blocks(); i++) { 1523 Block* block = get_block(i); 1524 // Check that _loop field are clear...we could clear them if not. 1525 assert(block->_loop == NULL, "clear _loop expected"); 1526 // Sanity check that the RPO numbering is reflected in the _blocks array. 1527 // It doesn't have to be for the loop tree to be built, but if it is not, 1528 // then the blocks have been reordered since dom graph building...which 1529 // may question the RPO numbering 1530 assert(block->_rpo == i, "unexpected reverse post order number"); 1531 } 1532 #endif 1533 1534 int idct = 0; 1535 CFGLoop* root_loop = new CFGLoop(idct++); 1536 1537 Block_List worklist; 1538 1539 // Assign blocks to loops 1540 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1541 Block* block = get_block(i); 1542 1543 if (block->head()->is_Loop()) { 1544 Block* loop_head = block; 1545 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1546 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1547 Block* tail = get_block_for_node(tail_n); 1548 1549 // Defensively filter out Loop nodes for non-single-entry loops. 1550 // For all reasonable loops, the head occurs before the tail in RPO. 1551 if (i <= tail->_rpo) { 1552 1553 // The tail and (recursive) predecessors of the tail 1554 // are made members of a new loop. 1555 1556 assert(worklist.size() == 0, "nonempty worklist"); 1557 CFGLoop* nloop = new CFGLoop(idct++); 1558 assert(loop_head->_loop == NULL, "just checking"); 1559 loop_head->_loop = nloop; 1560 // Add to nloop so push_pred() will skip over inner loops 1561 nloop->add_member(loop_head); 1562 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1563 1564 while (worklist.size() > 0) { 1565 Block* member = worklist.pop(); 1566 if (member != loop_head) { 1567 for (uint j = 1; j < member->num_preds(); j++) { 1568 nloop->push_pred(member, j, worklist, this); 1569 } 1570 } 1571 } 1572 } 1573 } 1574 } 1575 1576 // Create a member list for each loop consisting 1577 // of both blocks and (immediate child) loops. 1578 for (uint i = 0; i < number_of_blocks(); i++) { 1579 Block* block = get_block(i); 1580 CFGLoop* lp = block->_loop; 1581 if (lp == NULL) { 1582 // Not assigned to a loop. Add it to the method's pseudo loop. 1583 block->_loop = root_loop; 1584 lp = root_loop; 1585 } 1586 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1587 lp->add_member(block); 1588 } 1589 if (lp != root_loop) { 1590 if (lp->parent() == NULL) { 1591 // Not a nested loop. Make it a child of the method's pseudo loop. 1592 root_loop->add_nested_loop(lp); 1593 } 1594 if (block == lp->head()) { 1595 // Add nested loop to member list of parent loop. 1596 lp->parent()->add_member(lp); 1597 } 1598 } 1599 } 1600 1601 return root_loop; 1602 } 1603 1604 //------------------------------push_pred-------------------------------------- 1605 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1606 Node* pred_n = blk->pred(i); 1607 Block* pred = cfg->get_block_for_node(pred_n); 1608 CFGLoop *pred_loop = pred->_loop; 1609 if (pred_loop == NULL) { 1610 // Filter out blocks for non-single-entry loops. 1611 // For all reasonable loops, the head occurs before the tail in RPO. 1612 if (pred->_rpo > head()->_rpo) { 1613 pred->_loop = this; 1614 worklist.push(pred); 1615 } 1616 } else if (pred_loop != this) { 1617 // Nested loop. 1618 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1619 pred_loop = pred_loop->_parent; 1620 } 1621 // Make pred's loop be a child 1622 if (pred_loop->_parent == NULL) { 1623 add_nested_loop(pred_loop); 1624 // Continue with loop entry predecessor. 1625 Block* pred_head = pred_loop->head(); 1626 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1627 assert(pred_head != head(), "loop head in only one loop"); 1628 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1629 } else { 1630 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1631 } 1632 } 1633 } 1634 1635 //------------------------------add_nested_loop-------------------------------- 1636 // Make cl a child of the current loop in the loop tree. 1637 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1638 assert(_parent == NULL, "no parent yet"); 1639 assert(cl != this, "not my own parent"); 1640 cl->_parent = this; 1641 CFGLoop* ch = _child; 1642 if (ch == NULL) { 1643 _child = cl; 1644 } else { 1645 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1646 ch->_sibling = cl; 1647 } 1648 } 1649 1650 //------------------------------compute_loop_depth----------------------------- 1651 // Store the loop depth in each CFGLoop object. 1652 // Recursively walk the children to do the same for them. 1653 void CFGLoop::compute_loop_depth(int depth) { 1654 _depth = depth; 1655 CFGLoop* ch = _child; 1656 while (ch != NULL) { 1657 ch->compute_loop_depth(depth + 1); 1658 ch = ch->_sibling; 1659 } 1660 } 1661 1662 //------------------------------compute_freq----------------------------------- 1663 // Compute the frequency of each block and loop, relative to a single entry 1664 // into the dominating loop head. 1665 void CFGLoop::compute_freq() { 1666 // Bottom up traversal of loop tree (visit inner loops first.) 1667 // Set loop head frequency to 1.0, then transitively 1668 // compute frequency for all successors in the loop, 1669 // as well as for each exit edge. Inner loops are 1670 // treated as single blocks with loop exit targets 1671 // as the successor blocks. 1672 1673 // Nested loops first 1674 CFGLoop* ch = _child; 1675 while (ch != NULL) { 1676 ch->compute_freq(); 1677 ch = ch->_sibling; 1678 } 1679 assert (_members.length() > 0, "no empty loops"); 1680 Block* hd = head(); 1681 hd->_freq = 1.0; 1682 for (int i = 0; i < _members.length(); i++) { 1683 CFGElement* s = _members.at(i); 1684 double freq = s->_freq; 1685 if (s->is_block()) { 1686 Block* b = s->as_Block(); 1687 for (uint j = 0; j < b->_num_succs; j++) { 1688 Block* sb = b->_succs[j]; 1689 update_succ_freq(sb, freq * b->succ_prob(j)); 1690 } 1691 } else { 1692 CFGLoop* lp = s->as_CFGLoop(); 1693 assert(lp->_parent == this, "immediate child"); 1694 for (int k = 0; k < lp->_exits.length(); k++) { 1695 Block* eb = lp->_exits.at(k).get_target(); 1696 double prob = lp->_exits.at(k).get_prob(); 1697 update_succ_freq(eb, freq * prob); 1698 } 1699 } 1700 } 1701 1702 // For all loops other than the outer, "method" loop, 1703 // sum and normalize the exit probability. The "method" loop 1704 // should keep the initial exit probability of 1, so that 1705 // inner blocks do not get erroneously scaled. 1706 if (_depth != 0) { 1707 // Total the exit probabilities for this loop. 1708 double exits_sum = 0.0f; 1709 for (int i = 0; i < _exits.length(); i++) { 1710 exits_sum += _exits.at(i).get_prob(); 1711 } 1712 1713 // Normalize the exit probabilities. Until now, the 1714 // probabilities estimate the possibility of exit per 1715 // a single loop iteration; afterward, they estimate 1716 // the probability of exit per loop entry. 1717 for (int i = 0; i < _exits.length(); i++) { 1718 Block* et = _exits.at(i).get_target(); 1719 float new_prob = 0.0f; 1720 if (_exits.at(i).get_prob() > 0.0f) { 1721 new_prob = _exits.at(i).get_prob() / exits_sum; 1722 } 1723 BlockProbPair bpp(et, new_prob); 1724 _exits.at_put(i, bpp); 1725 } 1726 1727 // Save the total, but guard against unreasonable probability, 1728 // as the value is used to estimate the loop trip count. 1729 // An infinite trip count would blur relative block 1730 // frequencies. 1731 if (exits_sum > 1.0f) exits_sum = 1.0; 1732 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1733 _exit_prob = exits_sum; 1734 } 1735 } 1736 1737 //------------------------------succ_prob------------------------------------- 1738 // Determine the probability of reaching successor 'i' from the receiver block. 1739 float Block::succ_prob(uint i) { 1740 int eidx = end_idx(); 1741 Node *n = get_node(eidx); // Get ending Node 1742 1743 int op = n->Opcode(); 1744 if (n->is_Mach()) { 1745 if (n->is_MachNullCheck()) { 1746 // Can only reach here if called after lcm. The original Op_If is gone, 1747 // so we attempt to infer the probability from one or both of the 1748 // successor blocks. 1749 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1750 // If either successor has only one predecessor, then the 1751 // probability estimate can be derived using the 1752 // relative frequency of the successor and this block. 1753 if (_succs[i]->num_preds() == 2) { 1754 return _succs[i]->_freq / _freq; 1755 } else if (_succs[1-i]->num_preds() == 2) { 1756 return 1 - (_succs[1-i]->_freq / _freq); 1757 } else { 1758 // Estimate using both successor frequencies 1759 float freq = _succs[i]->_freq; 1760 return freq / (freq + _succs[1-i]->_freq); 1761 } 1762 } 1763 op = n->as_Mach()->ideal_Opcode(); 1764 } 1765 1766 1767 // Switch on branch type 1768 switch( op ) { 1769 case Op_CountedLoopEnd: 1770 case Op_If: { 1771 assert (i < 2, "just checking"); 1772 // Conditionals pass on only part of their frequency 1773 float prob = n->as_MachIf()->_prob; 1774 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1775 // If succ[i] is the FALSE branch, invert path info 1776 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1777 return 1.0f - prob; // not taken 1778 } else { 1779 return prob; // taken 1780 } 1781 } 1782 1783 case Op_Jump: 1784 // Divide the frequency between all successors evenly 1785 return 1.0f/_num_succs; 1786 1787 case Op_Catch: { 1788 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1789 if (ci->_con == CatchProjNode::fall_through_index) { 1790 // Fall-thru path gets the lion's share. 1791 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1792 } else { 1793 // Presume exceptional paths are equally unlikely 1794 return PROB_UNLIKELY_MAG(5); 1795 } 1796 } 1797 1798 case Op_Root: 1799 case Op_Goto: 1800 // Pass frequency straight thru to target 1801 return 1.0f; 1802 1803 case Op_NeverBranch: 1804 return 0.0f; 1805 1806 case Op_TailCall: 1807 case Op_TailJump: 1808 case Op_Return: 1809 case Op_Halt: 1810 case Op_Rethrow: 1811 // Do not push out freq to root block 1812 return 0.0f; 1813 1814 default: 1815 ShouldNotReachHere(); 1816 } 1817 1818 return 0.0f; 1819 } 1820 1821 //------------------------------num_fall_throughs----------------------------- 1822 // Return the number of fall-through candidates for a block 1823 int Block::num_fall_throughs() { 1824 int eidx = end_idx(); 1825 Node *n = get_node(eidx); // Get ending Node 1826 1827 int op = n->Opcode(); 1828 if (n->is_Mach()) { 1829 if (n->is_MachNullCheck()) { 1830 // In theory, either side can fall-thru, for simplicity sake, 1831 // let's say only the false branch can now. 1832 return 1; 1833 } 1834 op = n->as_Mach()->ideal_Opcode(); 1835 } 1836 1837 // Switch on branch type 1838 switch( op ) { 1839 case Op_CountedLoopEnd: 1840 case Op_If: 1841 return 2; 1842 1843 case Op_Root: 1844 case Op_Goto: 1845 return 1; 1846 1847 case Op_Catch: { 1848 for (uint i = 0; i < _num_succs; i++) { 1849 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1850 if (ci->_con == CatchProjNode::fall_through_index) { 1851 return 1; 1852 } 1853 } 1854 return 0; 1855 } 1856 1857 case Op_Jump: 1858 case Op_NeverBranch: 1859 case Op_TailCall: 1860 case Op_TailJump: 1861 case Op_Return: 1862 case Op_Halt: 1863 case Op_Rethrow: 1864 return 0; 1865 1866 default: 1867 ShouldNotReachHere(); 1868 } 1869 1870 return 0; 1871 } 1872 1873 //------------------------------succ_fall_through----------------------------- 1874 // Return true if a specific successor could be fall-through target. 1875 bool Block::succ_fall_through(uint i) { 1876 int eidx = end_idx(); 1877 Node *n = get_node(eidx); // Get ending Node 1878 1879 int op = n->Opcode(); 1880 if (n->is_Mach()) { 1881 if (n->is_MachNullCheck()) { 1882 // In theory, either side can fall-thru, for simplicity sake, 1883 // let's say only the false branch can now. 1884 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1885 } 1886 op = n->as_Mach()->ideal_Opcode(); 1887 } 1888 1889 // Switch on branch type 1890 switch( op ) { 1891 case Op_CountedLoopEnd: 1892 case Op_If: 1893 case Op_Root: 1894 case Op_Goto: 1895 return true; 1896 1897 case Op_Catch: { 1898 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1899 return ci->_con == CatchProjNode::fall_through_index; 1900 } 1901 1902 case Op_Jump: 1903 case Op_NeverBranch: 1904 case Op_TailCall: 1905 case Op_TailJump: 1906 case Op_Return: 1907 case Op_Halt: 1908 case Op_Rethrow: 1909 return false; 1910 1911 default: 1912 ShouldNotReachHere(); 1913 } 1914 1915 return false; 1916 } 1917 1918 //------------------------------update_uncommon_branch------------------------ 1919 // Update the probability of a two-branch to be uncommon 1920 void Block::update_uncommon_branch(Block* ub) { 1921 int eidx = end_idx(); 1922 Node *n = get_node(eidx); // Get ending Node 1923 1924 int op = n->as_Mach()->ideal_Opcode(); 1925 1926 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 1927 assert(num_fall_throughs() == 2, "must be a two way branch block"); 1928 1929 // Which successor is ub? 1930 uint s; 1931 for (s = 0; s <_num_succs; s++) { 1932 if (_succs[s] == ub) break; 1933 } 1934 assert(s < 2, "uncommon successor must be found"); 1935 1936 // If ub is the true path, make the proability small, else 1937 // ub is the false path, and make the probability large 1938 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 1939 1940 // Get existing probability 1941 float p = n->as_MachIf()->_prob; 1942 1943 if (invert) p = 1.0 - p; 1944 if (p > PROB_MIN) { 1945 p = PROB_MIN; 1946 } 1947 if (invert) p = 1.0 - p; 1948 1949 n->as_MachIf()->_prob = p; 1950 } 1951 1952 //------------------------------update_succ_freq------------------------------- 1953 // Update the appropriate frequency associated with block 'b', a successor of 1954 // a block in this loop. 1955 void CFGLoop::update_succ_freq(Block* b, double freq) { 1956 if (b->_loop == this) { 1957 if (b == head()) { 1958 // back branch within the loop 1959 // Do nothing now, the loop carried frequency will be 1960 // adjust later in scale_freq(). 1961 } else { 1962 // simple branch within the loop 1963 b->_freq += freq; 1964 } 1965 } else if (!in_loop_nest(b)) { 1966 // branch is exit from this loop 1967 BlockProbPair bpp(b, freq); 1968 _exits.append(bpp); 1969 } else { 1970 // branch into nested loop 1971 CFGLoop* ch = b->_loop; 1972 ch->_freq += freq; 1973 } 1974 } 1975 1976 //------------------------------in_loop_nest----------------------------------- 1977 // Determine if block b is in the receiver's loop nest. 1978 bool CFGLoop::in_loop_nest(Block* b) { 1979 int depth = _depth; 1980 CFGLoop* b_loop = b->_loop; 1981 int b_depth = b_loop->_depth; 1982 if (depth == b_depth) { 1983 return true; 1984 } 1985 while (b_depth > depth) { 1986 b_loop = b_loop->_parent; 1987 b_depth = b_loop->_depth; 1988 } 1989 return b_loop == this; 1990 } 1991 1992 //------------------------------scale_freq------------------------------------- 1993 // Scale frequency of loops and blocks by trip counts from outer loops 1994 // Do a top down traversal of loop tree (visit outer loops first.) 1995 void CFGLoop::scale_freq() { 1996 double loop_freq = _freq * trip_count(); 1997 _freq = loop_freq; 1998 for (int i = 0; i < _members.length(); i++) { 1999 CFGElement* s = _members.at(i); 2000 double block_freq = s->_freq * loop_freq; 2001 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 2002 block_freq = MIN_BLOCK_FREQUENCY; 2003 s->_freq = block_freq; 2004 } 2005 CFGLoop* ch = _child; 2006 while (ch != NULL) { 2007 ch->scale_freq(); 2008 ch = ch->_sibling; 2009 } 2010 } 2011 2012 // Frequency of outer loop 2013 double CFGLoop::outer_loop_freq() const { 2014 if (_child != NULL) { 2015 return _child->_freq; 2016 } 2017 return _freq; 2018 } 2019 2020 #ifndef PRODUCT 2021 //------------------------------dump_tree-------------------------------------- 2022 void CFGLoop::dump_tree() const { 2023 dump(); 2024 if (_child != NULL) _child->dump_tree(); 2025 if (_sibling != NULL) _sibling->dump_tree(); 2026 } 2027 2028 //------------------------------dump------------------------------------------- 2029 void CFGLoop::dump() const { 2030 for (int i = 0; i < _depth; i++) tty->print(" "); 2031 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2032 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2033 for (int i = 0; i < _depth; i++) tty->print(" "); 2034 tty->print(" members:"); 2035 int k = 0; 2036 for (int i = 0; i < _members.length(); i++) { 2037 if (k++ >= 6) { 2038 tty->print("\n "); 2039 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2040 k = 0; 2041 } 2042 CFGElement *s = _members.at(i); 2043 if (s->is_block()) { 2044 Block *b = s->as_Block(); 2045 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2046 } else { 2047 CFGLoop* lp = s->as_CFGLoop(); 2048 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2049 } 2050 } 2051 tty->print("\n"); 2052 for (int i = 0; i < _depth; i++) tty->print(" "); 2053 tty->print(" exits: "); 2054 k = 0; 2055 for (int i = 0; i < _exits.length(); i++) { 2056 if (k++ >= 7) { 2057 tty->print("\n "); 2058 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2059 k = 0; 2060 } 2061 Block *blk = _exits.at(i).get_target(); 2062 double prob = _exits.at(i).get_prob(); 2063 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2064 } 2065 tty->print("\n"); 2066 } 2067 #endif