1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "opto/block.hpp" 30 #include "opto/c2compiler.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/machnode.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/phaseX.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/runtime.hpp" 38 #include "opto/chaitin.hpp" 39 #include "runtime/deoptimization.hpp" 40 41 // Portions of code courtesy of Clifford Click 42 43 // Optimization - Graph Style 44 45 // To avoid float value underflow 46 #define MIN_BLOCK_FREQUENCY 1.e-35f 47 48 //----------------------------schedule_node_into_block------------------------- 49 // Insert node n into block b. Look for projections of n and make sure they 50 // are in b also. 51 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 52 // Set basic block of n, Add n to b, 53 map_node_to_block(n, b); 54 b->add_inst(n); 55 56 // After Matching, nearly any old Node may have projections trailing it. 57 // These are usually machine-dependent flags. In any case, they might 58 // float to another block below this one. Move them up. 59 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 60 Node* use = n->fast_out(i); 61 if (use->is_Proj()) { 62 Block* buse = get_block_for_node(use); 63 if (buse != b) { // In wrong block? 64 if (buse != NULL) { 65 buse->find_remove(use); // Remove from wrong block 66 } 67 map_node_to_block(use, b); 68 b->add_inst(use); 69 } 70 } 71 } 72 } 73 74 //----------------------------replace_block_proj_ctrl------------------------- 75 // Nodes that have is_block_proj() nodes as their control need to use 76 // the appropriate Region for their actual block as their control since 77 // the projection will be in a predecessor block. 78 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 79 const Node *in0 = n->in(0); 80 assert(in0 != NULL, "Only control-dependent"); 81 const Node *p = in0->is_block_proj(); 82 if (p != NULL && p != n) { // Control from a block projection? 83 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 84 // Find trailing Region 85 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 86 uint j = 0; 87 if (pb->_num_succs != 1) { // More then 1 successor? 88 // Search for successor 89 uint max = pb->number_of_nodes(); 90 assert( max > 1, "" ); 91 uint start = max - pb->_num_succs; 92 // Find which output path belongs to projection 93 for (j = start; j < max; j++) { 94 if( pb->get_node(j) == in0 ) 95 break; 96 } 97 assert( j < max, "must find" ); 98 // Change control to match head of successor basic block 99 j -= start; 100 } 101 n->set_req(0, pb->_succs[j]->head()); 102 } 103 } 104 105 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) { 106 if (dom_node == node) { 107 return true; 108 } 109 Block* d = get_block_for_node(dom_node); 110 Block* n = get_block_for_node(node); 111 if (d == n) { 112 if (dom_node->is_block_start()) { 113 return true; 114 } 115 if (node->is_block_start()) { 116 return false; 117 } 118 if (dom_node->is_block_proj()) { 119 return false; 120 } 121 if (node->is_block_proj()) { 122 return true; 123 } 124 #ifdef ASSERT 125 node->dump(); 126 dom_node->dump(); 127 #endif 128 fatal("unhandled"); 129 return false; 130 } 131 return d->dom_lca(n) == d; 132 } 133 134 //------------------------------schedule_pinned_nodes-------------------------- 135 // Set the basic block for Nodes pinned into blocks 136 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 137 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 138 GrowableArray <Node *> spstack(C->live_nodes() + 8); 139 spstack.push(_root); 140 while (spstack.is_nonempty()) { 141 Node* node = spstack.pop(); 142 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 143 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 144 assert(node->in(0), "pinned Node must have Control"); 145 // Before setting block replace block_proj control edge 146 replace_block_proj_ctrl(node); 147 Node* input = node->in(0); 148 while (!input->is_block_start()) { 149 input = input->in(0); 150 } 151 Block* block = get_block_for_node(input); // Basic block of controlling input 152 schedule_node_into_block(node, block); 153 } 154 155 // If the node has precedence edges (added when CastPP nodes are 156 // removed in final_graph_reshaping), fix the control of the 157 // node to cover the precedence edges and remove the 158 // dependencies. 159 Node* n = NULL; 160 for (uint i = node->len()-1; i >= node->req(); i--) { 161 Node* m = node->in(i); 162 if (m == NULL) continue; 163 // Skip the precedence edge if the test that guarded a CastPP: 164 // - was optimized out during escape analysis 165 // (OptimizePtrCompare): the CastPP's control isn't an end of 166 // block. 167 // - is moved in the branch of a dominating If: the control of 168 // the CastPP is then a Region. 169 if (m->is_block_proj() || m->is_block_start()) { 170 node->rm_prec(i); 171 if (n == NULL) { 172 n = m; 173 } else { 174 assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other"); 175 n = is_dominator(n, m) ? m : n; 176 } 177 } 178 } 179 if (n != NULL) { 180 assert(node->in(0), "control should have been set"); 181 assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other"); 182 if (!is_dominator(n, node->in(0))) { 183 node->set_req(0, n); 184 } 185 } 186 187 // process all inputs that are non NULL 188 for (int i = node->req() - 1; i >= 0; --i) { 189 if (node->in(i) != NULL) { 190 spstack.push(node->in(i)); 191 } 192 } 193 } 194 } 195 } 196 197 #ifdef ASSERT 198 // Assert that new input b2 is dominated by all previous inputs. 199 // Check this by by seeing that it is dominated by b1, the deepest 200 // input observed until b2. 201 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 202 if (b1 == NULL) return; 203 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 204 Block* tmp = b2; 205 while (tmp != b1 && tmp != NULL) { 206 tmp = tmp->_idom; 207 } 208 if (tmp != b1) { 209 // Detected an unschedulable graph. Print some nice stuff and die. 210 tty->print_cr("!!! Unschedulable graph !!!"); 211 for (uint j=0; j<n->len(); j++) { // For all inputs 212 Node* inn = n->in(j); // Get input 213 if (inn == NULL) continue; // Ignore NULL, missing inputs 214 Block* inb = cfg->get_block_for_node(inn); 215 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 216 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 217 inn->dump(); 218 } 219 tty->print("Failing node: "); 220 n->dump(); 221 assert(false, "unscheduable graph"); 222 } 223 } 224 #endif 225 226 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 227 // Find the last input dominated by all other inputs. 228 Block* deepb = NULL; // Deepest block so far 229 int deepb_dom_depth = 0; 230 for (uint k = 0; k < n->len(); k++) { // For all inputs 231 Node* inn = n->in(k); // Get input 232 if (inn == NULL) continue; // Ignore NULL, missing inputs 233 Block* inb = cfg->get_block_for_node(inn); 234 assert(inb != NULL, "must already have scheduled this input"); 235 if (deepb_dom_depth < (int) inb->_dom_depth) { 236 // The new inb must be dominated by the previous deepb. 237 // The various inputs must be linearly ordered in the dom 238 // tree, or else there will not be a unique deepest block. 239 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 240 deepb = inb; // Save deepest block 241 deepb_dom_depth = deepb->_dom_depth; 242 } 243 } 244 assert(deepb != NULL, "must be at least one input to n"); 245 return deepb; 246 } 247 248 249 //------------------------------schedule_early--------------------------------- 250 // Find the earliest Block any instruction can be placed in. Some instructions 251 // are pinned into Blocks. Unpinned instructions can appear in last block in 252 // which all their inputs occur. 253 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) { 254 // Allocate stack with enough space to avoid frequent realloc 255 Node_Stack nstack(roots.size() + 8); 256 // _root will be processed among C->top() inputs 257 roots.push(C->top(), 0); 258 visited.set(C->top()->_idx); 259 260 while (roots.size() != 0) { 261 // Use local variables nstack_top_n & nstack_top_i to cache values 262 // on stack's top. 263 Node* parent_node = roots.node(); 264 uint input_index = 0; 265 roots.pop(); 266 267 while (true) { 268 if (input_index == 0) { 269 // Fixup some control. Constants without control get attached 270 // to root and nodes that use is_block_proj() nodes should be attached 271 // to the region that starts their block. 272 const Node* control_input = parent_node->in(0); 273 if (control_input != NULL) { 274 replace_block_proj_ctrl(parent_node); 275 } else { 276 // Is a constant with NO inputs? 277 if (parent_node->req() == 1) { 278 parent_node->set_req(0, _root); 279 } 280 } 281 } 282 283 // First, visit all inputs and force them to get a block. If an 284 // input is already in a block we quit following inputs (to avoid 285 // cycles). Instead we put that Node on a worklist to be handled 286 // later (since IT'S inputs may not have a block yet). 287 288 // Assume all n's inputs will be processed 289 bool done = true; 290 291 while (input_index < parent_node->len()) { 292 Node* in = parent_node->in(input_index++); 293 if (in == NULL) { 294 continue; 295 } 296 297 int is_visited = visited.test_set(in->_idx); 298 if (!has_block(in)) { 299 if (is_visited) { 300 assert(false, "graph should be schedulable"); 301 return false; 302 } 303 // Save parent node and next input's index. 304 nstack.push(parent_node, input_index); 305 // Process current input now. 306 parent_node = in; 307 input_index = 0; 308 // Not all n's inputs processed. 309 done = false; 310 break; 311 } else if (!is_visited) { 312 // Visit this guy later, using worklist 313 roots.push(in, 0); 314 } 315 } 316 317 if (done) { 318 // All of n's inputs have been processed, complete post-processing. 319 320 // Some instructions are pinned into a block. These include Region, 321 // Phi, Start, Return, and other control-dependent instructions and 322 // any projections which depend on them. 323 if (!parent_node->pinned()) { 324 // Set earliest legal block. 325 Block* earliest_block = find_deepest_input(parent_node, this); 326 map_node_to_block(parent_node, earliest_block); 327 } else { 328 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 329 } 330 331 if (nstack.is_empty()) { 332 // Finished all nodes on stack. 333 // Process next node on the worklist 'roots'. 334 break; 335 } 336 // Get saved parent node and next input's index. 337 parent_node = nstack.node(); 338 input_index = nstack.index(); 339 nstack.pop(); 340 } 341 } 342 } 343 return true; 344 } 345 346 //------------------------------dom_lca---------------------------------------- 347 // Find least common ancestor in dominator tree 348 // LCA is a current notion of LCA, to be raised above 'this'. 349 // As a convenient boundary condition, return 'this' if LCA is NULL. 350 // Find the LCA of those two nodes. 351 Block* Block::dom_lca(Block* LCA) { 352 if (LCA == NULL || LCA == this) return this; 353 354 Block* anc = this; 355 while (anc->_dom_depth > LCA->_dom_depth) 356 anc = anc->_idom; // Walk up till anc is as high as LCA 357 358 while (LCA->_dom_depth > anc->_dom_depth) 359 LCA = LCA->_idom; // Walk up till LCA is as high as anc 360 361 while (LCA != anc) { // Walk both up till they are the same 362 LCA = LCA->_idom; 363 anc = anc->_idom; 364 } 365 366 return LCA; 367 } 368 369 //--------------------------raise_LCA_above_use-------------------------------- 370 // We are placing a definition, and have been given a def->use edge. 371 // The definition must dominate the use, so move the LCA upward in the 372 // dominator tree to dominate the use. If the use is a phi, adjust 373 // the LCA only with the phi input paths which actually use this def. 374 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 375 Block* buse = cfg->get_block_for_node(use); 376 if (buse == NULL) return LCA; // Unused killing Projs have no use block 377 if (!use->is_Phi()) return buse->dom_lca(LCA); 378 uint pmax = use->req(); // Number of Phi inputs 379 // Why does not this loop just break after finding the matching input to 380 // the Phi? Well...it's like this. I do not have true def-use/use-def 381 // chains. Means I cannot distinguish, from the def-use direction, which 382 // of many use-defs lead from the same use to the same def. That is, this 383 // Phi might have several uses of the same def. Each use appears in a 384 // different predecessor block. But when I enter here, I cannot distinguish 385 // which use-def edge I should find the predecessor block for. So I find 386 // them all. Means I do a little extra work if a Phi uses the same value 387 // more than once. 388 for (uint j=1; j<pmax; j++) { // For all inputs 389 if (use->in(j) == def) { // Found matching input? 390 Block* pred = cfg->get_block_for_node(buse->pred(j)); 391 LCA = pred->dom_lca(LCA); 392 } 393 } 394 return LCA; 395 } 396 397 //----------------------------raise_LCA_above_marks---------------------------- 398 // Return a new LCA that dominates LCA and any of its marked predecessors. 399 // Search all my parents up to 'early' (exclusive), looking for predecessors 400 // which are marked with the given index. Return the LCA (in the dom tree) 401 // of all marked blocks. If there are none marked, return the original 402 // LCA. 403 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 404 Block_List worklist; 405 worklist.push(LCA); 406 while (worklist.size() > 0) { 407 Block* mid = worklist.pop(); 408 if (mid == early) continue; // stop searching here 409 410 // Test and set the visited bit. 411 if (mid->raise_LCA_visited() == mark) continue; // already visited 412 413 // Don't process the current LCA, otherwise the search may terminate early 414 if (mid != LCA && mid->raise_LCA_mark() == mark) { 415 // Raise the LCA. 416 LCA = mid->dom_lca(LCA); 417 if (LCA == early) break; // stop searching everywhere 418 assert(early->dominates(LCA), "early is high enough"); 419 // Resume searching at that point, skipping intermediate levels. 420 worklist.push(LCA); 421 if (LCA == mid) 422 continue; // Don't mark as visited to avoid early termination. 423 } else { 424 // Keep searching through this block's predecessors. 425 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 426 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 427 worklist.push(mid_parent); 428 } 429 } 430 mid->set_raise_LCA_visited(mark); 431 } 432 return LCA; 433 } 434 435 //--------------------------memory_early_block-------------------------------- 436 // This is a variation of find_deepest_input, the heart of schedule_early. 437 // Find the "early" block for a load, if we considered only memory and 438 // address inputs, that is, if other data inputs were ignored. 439 // 440 // Because a subset of edges are considered, the resulting block will 441 // be earlier (at a shallower dom_depth) than the true schedule_early 442 // point of the node. We compute this earlier block as a more permissive 443 // site for anti-dependency insertion, but only if subsume_loads is enabled. 444 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 445 Node* base; 446 Node* index; 447 Node* store = load->in(MemNode::Memory); 448 load->as_Mach()->memory_inputs(base, index); 449 450 assert(base != NodeSentinel && index != NodeSentinel, 451 "unexpected base/index inputs"); 452 453 Node* mem_inputs[4]; 454 int mem_inputs_length = 0; 455 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 456 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 457 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 458 459 // In the comparision below, add one to account for the control input, 460 // which may be null, but always takes up a spot in the in array. 461 if (mem_inputs_length + 1 < (int) load->req()) { 462 // This "load" has more inputs than just the memory, base and index inputs. 463 // For purposes of checking anti-dependences, we need to start 464 // from the early block of only the address portion of the instruction, 465 // and ignore other blocks that may have factored into the wider 466 // schedule_early calculation. 467 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 468 469 Block* deepb = NULL; // Deepest block so far 470 int deepb_dom_depth = 0; 471 for (int i = 0; i < mem_inputs_length; i++) { 472 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 473 if (deepb_dom_depth < (int) inb->_dom_depth) { 474 // The new inb must be dominated by the previous deepb. 475 // The various inputs must be linearly ordered in the dom 476 // tree, or else there will not be a unique deepest block. 477 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 478 deepb = inb; // Save deepest block 479 deepb_dom_depth = deepb->_dom_depth; 480 } 481 } 482 early = deepb; 483 } 484 485 return early; 486 } 487 488 //--------------------------insert_anti_dependences--------------------------- 489 // A load may need to witness memory that nearby stores can overwrite. 490 // For each nearby store, either insert an "anti-dependence" edge 491 // from the load to the store, or else move LCA upward to force the 492 // load to (eventually) be scheduled in a block above the store. 493 // 494 // Do not add edges to stores on distinct control-flow paths; 495 // only add edges to stores which might interfere. 496 // 497 // Return the (updated) LCA. There will not be any possibly interfering 498 // store between the load's "early block" and the updated LCA. 499 // Any stores in the updated LCA will have new precedence edges 500 // back to the load. The caller is expected to schedule the load 501 // in the LCA, in which case the precedence edges will make LCM 502 // preserve anti-dependences. The caller may also hoist the load 503 // above the LCA, if it is not the early block. 504 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 505 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 506 assert(LCA != NULL, ""); 507 DEBUG_ONLY(Block* LCA_orig = LCA); 508 509 // Compute the alias index. Loads and stores with different alias indices 510 // do not need anti-dependence edges. 511 int load_alias_idx = C->get_alias_index(load->adr_type()); 512 #ifdef ASSERT 513 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 514 (PrintOpto || VerifyAliases || 515 (PrintMiscellaneous && (WizardMode || Verbose)))) { 516 // Load nodes should not consume all of memory. 517 // Reporting a bottom type indicates a bug in adlc. 518 // If some particular type of node validly consumes all of memory, 519 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 520 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 521 load->dump(2); 522 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 523 } 524 #endif 525 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 526 "String compare is only known 'load' that does not conflict with any stores"); 527 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 528 "String equals is a 'load' that does not conflict with any stores"); 529 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 530 "String indexOf is a 'load' that does not conflict with any stores"); 531 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOfChar), 532 "String indexOfChar is a 'load' that does not conflict with any stores"); 533 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 534 "Arrays equals is a 'load' that does not conflict with any stores"); 535 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_HasNegatives), 536 "HasNegatives is a 'load' that does not conflict with any stores"); 537 538 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 539 // It is impossible to spoil this load by putting stores before it, 540 // because we know that the stores will never update the value 541 // which 'load' must witness. 542 return LCA; 543 } 544 545 node_idx_t load_index = load->_idx; 546 547 // Note the earliest legal placement of 'load', as determined by 548 // by the unique point in the dom tree where all memory effects 549 // and other inputs are first available. (Computed by schedule_early.) 550 // For normal loads, 'early' is the shallowest place (dom graph wise) 551 // to look for anti-deps between this load and any store. 552 Block* early = get_block_for_node(load); 553 554 // If we are subsuming loads, compute an "early" block that only considers 555 // memory or address inputs. This block may be different than the 556 // schedule_early block in that it could be at an even shallower depth in the 557 // dominator tree, and allow for a broader discovery of anti-dependences. 558 if (C->subsume_loads()) { 559 early = memory_early_block(load, early, this); 560 } 561 562 ResourceArea *area = Thread::current()->resource_area(); 563 Node_List worklist_mem(area); // prior memory state to store 564 Node_List worklist_store(area); // possible-def to explore 565 Node_List worklist_visited(area); // visited mergemem nodes 566 Node_List non_early_stores(area); // all relevant stores outside of early 567 bool must_raise_LCA = false; 568 569 #ifdef TRACK_PHI_INPUTS 570 // %%% This extra checking fails because MergeMem nodes are not GVNed. 571 // Provide "phi_inputs" to check if every input to a PhiNode is from the 572 // original memory state. This indicates a PhiNode for which should not 573 // prevent the load from sinking. For such a block, set_raise_LCA_mark 574 // may be overly conservative. 575 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 576 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 577 #endif 578 579 // 'load' uses some memory state; look for users of the same state. 580 // Recurse through MergeMem nodes to the stores that use them. 581 582 // Each of these stores is a possible definition of memory 583 // that 'load' needs to use. We need to force 'load' 584 // to occur before each such store. When the store is in 585 // the same block as 'load', we insert an anti-dependence 586 // edge load->store. 587 588 // The relevant stores "nearby" the load consist of a tree rooted 589 // at initial_mem, with internal nodes of type MergeMem. 590 // Therefore, the branches visited by the worklist are of this form: 591 // initial_mem -> (MergeMem ->)* store 592 // The anti-dependence constraints apply only to the fringe of this tree. 593 594 Node* initial_mem = load->in(MemNode::Memory); 595 worklist_store.push(initial_mem); 596 worklist_visited.push(initial_mem); 597 worklist_mem.push(NULL); 598 while (worklist_store.size() > 0) { 599 // Examine a nearby store to see if it might interfere with our load. 600 Node* mem = worklist_mem.pop(); 601 Node* store = worklist_store.pop(); 602 uint op = store->Opcode(); 603 604 // MergeMems do not directly have anti-deps. 605 // Treat them as internal nodes in a forward tree of memory states, 606 // the leaves of which are each a 'possible-def'. 607 if (store == initial_mem // root (exclusive) of tree we are searching 608 || op == Op_MergeMem // internal node of tree we are searching 609 ) { 610 mem = store; // It's not a possibly interfering store. 611 if (store == initial_mem) 612 initial_mem = NULL; // only process initial memory once 613 614 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 615 store = mem->fast_out(i); 616 if (store->is_MergeMem()) { 617 // Be sure we don't get into combinatorial problems. 618 // (Allow phis to be repeated; they can merge two relevant states.) 619 uint j = worklist_visited.size(); 620 for (; j > 0; j--) { 621 if (worklist_visited.at(j-1) == store) break; 622 } 623 if (j > 0) continue; // already on work list; do not repeat 624 worklist_visited.push(store); 625 } 626 worklist_mem.push(mem); 627 worklist_store.push(store); 628 } 629 continue; 630 } 631 632 if (op == Op_MachProj || op == Op_Catch) continue; 633 if (store->needs_anti_dependence_check()) continue; // not really a store 634 635 // Compute the alias index. Loads and stores with different alias 636 // indices do not need anti-dependence edges. Wide MemBar's are 637 // anti-dependent on everything (except immutable memories). 638 const TypePtr* adr_type = store->adr_type(); 639 if (!C->can_alias(adr_type, load_alias_idx)) continue; 640 641 // Most slow-path runtime calls do NOT modify Java memory, but 642 // they can block and so write Raw memory. 643 if (store->is_Mach()) { 644 MachNode* mstore = store->as_Mach(); 645 if (load_alias_idx != Compile::AliasIdxRaw) { 646 // Check for call into the runtime using the Java calling 647 // convention (and from there into a wrapper); it has no 648 // _method. Can't do this optimization for Native calls because 649 // they CAN write to Java memory. 650 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 651 assert(mstore->is_MachSafePoint(), ""); 652 MachSafePointNode* ms = (MachSafePointNode*) mstore; 653 assert(ms->is_MachCallJava(), ""); 654 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 655 if (mcj->_method == NULL) { 656 // These runtime calls do not write to Java visible memory 657 // (other than Raw) and so do not require anti-dependence edges. 658 continue; 659 } 660 } 661 // Same for SafePoints: they read/write Raw but only read otherwise. 662 // This is basically a workaround for SafePoints only defining control 663 // instead of control + memory. 664 if (mstore->ideal_Opcode() == Op_SafePoint) 665 continue; 666 } else { 667 // Some raw memory, such as the load of "top" at an allocation, 668 // can be control dependent on the previous safepoint. See 669 // comments in GraphKit::allocate_heap() about control input. 670 // Inserting an anti-dep between such a safepoint and a use 671 // creates a cycle, and will cause a subsequent failure in 672 // local scheduling. (BugId 4919904) 673 // (%%% How can a control input be a safepoint and not a projection??) 674 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 675 continue; 676 } 677 } 678 679 // Identify a block that the current load must be above, 680 // or else observe that 'store' is all the way up in the 681 // earliest legal block for 'load'. In the latter case, 682 // immediately insert an anti-dependence edge. 683 Block* store_block = get_block_for_node(store); 684 assert(store_block != NULL, "unused killing projections skipped above"); 685 686 if (store->is_Phi()) { 687 // 'load' uses memory which is one (or more) of the Phi's inputs. 688 // It must be scheduled not before the Phi, but rather before 689 // each of the relevant Phi inputs. 690 // 691 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 692 // we mark each corresponding predecessor block and do a combined 693 // hoisting operation later (raise_LCA_above_marks). 694 // 695 // Do not assert(store_block != early, "Phi merging memory after access") 696 // PhiNode may be at start of block 'early' with backedge to 'early' 697 DEBUG_ONLY(bool found_match = false); 698 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 699 if (store->in(j) == mem) { // Found matching input? 700 DEBUG_ONLY(found_match = true); 701 Block* pred_block = get_block_for_node(store_block->pred(j)); 702 if (pred_block != early) { 703 // If any predecessor of the Phi matches the load's "early block", 704 // we do not need a precedence edge between the Phi and 'load' 705 // since the load will be forced into a block preceding the Phi. 706 pred_block->set_raise_LCA_mark(load_index); 707 assert(!LCA_orig->dominates(pred_block) || 708 early->dominates(pred_block), "early is high enough"); 709 must_raise_LCA = true; 710 } else { 711 // anti-dependent upon PHI pinned below 'early', no edge needed 712 LCA = early; // but can not schedule below 'early' 713 } 714 } 715 } 716 assert(found_match, "no worklist bug"); 717 #ifdef TRACK_PHI_INPUTS 718 #ifdef ASSERT 719 // This assert asks about correct handling of PhiNodes, which may not 720 // have all input edges directly from 'mem'. See BugId 4621264 721 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 722 // Increment by exactly one even if there are multiple copies of 'mem' 723 // coming into the phi, because we will run this block several times 724 // if there are several copies of 'mem'. (That's how DU iterators work.) 725 phi_inputs.at_put(store->_idx, num_mem_inputs); 726 assert(PhiNode::Input + num_mem_inputs < store->req(), 727 "Expect at least one phi input will not be from original memory state"); 728 #endif //ASSERT 729 #endif //TRACK_PHI_INPUTS 730 } else if (store_block != early) { 731 // 'store' is between the current LCA and earliest possible block. 732 // Label its block, and decide later on how to raise the LCA 733 // to include the effect on LCA of this store. 734 // If this store's block gets chosen as the raised LCA, we 735 // will find him on the non_early_stores list and stick him 736 // with a precedence edge. 737 // (But, don't bother if LCA is already raised all the way.) 738 if (LCA != early) { 739 store_block->set_raise_LCA_mark(load_index); 740 must_raise_LCA = true; 741 non_early_stores.push(store); 742 } 743 } else { 744 // Found a possibly-interfering store in the load's 'early' block. 745 // This means 'load' cannot sink at all in the dominator tree. 746 // Add an anti-dep edge, and squeeze 'load' into the highest block. 747 assert(store != load->in(0), "dependence cycle found"); 748 if (verify) { 749 assert(store->find_edge(load) != -1, "missing precedence edge"); 750 } else { 751 store->add_prec(load); 752 } 753 LCA = early; 754 // This turns off the process of gathering non_early_stores. 755 } 756 } 757 // (Worklist is now empty; all nearby stores have been visited.) 758 759 // Finished if 'load' must be scheduled in its 'early' block. 760 // If we found any stores there, they have already been given 761 // precedence edges. 762 if (LCA == early) return LCA; 763 764 // We get here only if there are no possibly-interfering stores 765 // in the load's 'early' block. Move LCA up above all predecessors 766 // which contain stores we have noted. 767 // 768 // The raised LCA block can be a home to such interfering stores, 769 // but its predecessors must not contain any such stores. 770 // 771 // The raised LCA will be a lower bound for placing the load, 772 // preventing the load from sinking past any block containing 773 // a store that may invalidate the memory state required by 'load'. 774 if (must_raise_LCA) 775 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 776 if (LCA == early) return LCA; 777 778 // Insert anti-dependence edges from 'load' to each store 779 // in the non-early LCA block. 780 // Mine the non_early_stores list for such stores. 781 if (LCA->raise_LCA_mark() == load_index) { 782 while (non_early_stores.size() > 0) { 783 Node* store = non_early_stores.pop(); 784 Block* store_block = get_block_for_node(store); 785 if (store_block == LCA) { 786 // add anti_dependence from store to load in its own block 787 assert(store != load->in(0), "dependence cycle found"); 788 if (verify) { 789 assert(store->find_edge(load) != -1, "missing precedence edge"); 790 } else { 791 store->add_prec(load); 792 } 793 } else { 794 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 795 // Any other stores we found must be either inside the new LCA 796 // or else outside the original LCA. In the latter case, they 797 // did not interfere with any use of 'load'. 798 assert(LCA->dominates(store_block) 799 || !LCA_orig->dominates(store_block), "no stray stores"); 800 } 801 } 802 } 803 804 // Return the highest block containing stores; any stores 805 // within that block have been given anti-dependence edges. 806 return LCA; 807 } 808 809 // This class is used to iterate backwards over the nodes in the graph. 810 811 class Node_Backward_Iterator { 812 813 private: 814 Node_Backward_Iterator(); 815 816 public: 817 // Constructor for the iterator 818 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg); 819 820 // Postincrement operator to iterate over the nodes 821 Node *next(); 822 823 private: 824 VectorSet &_visited; 825 Node_Stack &_stack; 826 PhaseCFG &_cfg; 827 }; 828 829 // Constructor for the Node_Backward_Iterator 830 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg) 831 : _visited(visited), _stack(stack), _cfg(cfg) { 832 // The stack should contain exactly the root 833 stack.clear(); 834 stack.push(root, root->outcnt()); 835 836 // Clear the visited bits 837 visited.Clear(); 838 } 839 840 // Iterator for the Node_Backward_Iterator 841 Node *Node_Backward_Iterator::next() { 842 843 // If the _stack is empty, then just return NULL: finished. 844 if ( !_stack.size() ) 845 return NULL; 846 847 // I visit unvisited not-anti-dependence users first, then anti-dependent 848 // children next. I iterate backwards to support removal of nodes. 849 // The stack holds states consisting of 3 values: 850 // current Def node, flag which indicates 1st/2nd pass, index of current out edge 851 Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1); 852 bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1); 853 uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes. 854 _stack.pop(); 855 856 // I cycle here when I am entering a deeper level of recursion. 857 // The key variable 'self' was set prior to jumping here. 858 while( 1 ) { 859 860 _visited.set(self->_idx); 861 862 // Now schedule all uses as late as possible. 863 const Node* src = self->is_Proj() ? self->in(0) : self; 864 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 865 866 // Schedule all nodes in a post-order visit 867 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 868 869 // Scan for unvisited nodes 870 while (idx > 0) { 871 // For all uses, schedule late 872 Node* n = self->raw_out(--idx); // Use 873 874 // Skip already visited children 875 if ( _visited.test(n->_idx) ) 876 continue; 877 878 // do not traverse backward control edges 879 Node *use = n->is_Proj() ? n->in(0) : n; 880 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 881 882 if ( use_rpo < src_rpo ) 883 continue; 884 885 // Phi nodes always precede uses in a basic block 886 if ( use_rpo == src_rpo && use->is_Phi() ) 887 continue; 888 889 unvisited = n; // Found unvisited 890 891 // Check for possible-anti-dependent 892 // 1st pass: No such nodes, 2nd pass: Only such nodes. 893 if (n->needs_anti_dependence_check() == iterate_anti_dep) { 894 unvisited = n; // Found unvisited 895 break; 896 } 897 } 898 899 // Did I find an unvisited not-anti-dependent Node? 900 if (!unvisited) { 901 if (!iterate_anti_dep) { 902 // 2nd pass: Iterate over nodes which needs_anti_dependence_check. 903 iterate_anti_dep = true; 904 idx = self->outcnt(); 905 continue; 906 } 907 break; // All done with children; post-visit 'self' 908 } 909 910 // Visit the unvisited Node. Contains the obvious push to 911 // indicate I'm entering a deeper level of recursion. I push the 912 // old state onto the _stack and set a new state and loop (recurse). 913 _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx); 914 self = unvisited; 915 iterate_anti_dep = false; 916 idx = self->outcnt(); 917 } // End recursion loop 918 919 return self; 920 } 921 922 //------------------------------ComputeLatenciesBackwards---------------------- 923 // Compute the latency of all the instructions. 924 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) { 925 #ifndef PRODUCT 926 if (trace_opto_pipelining()) 927 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 928 #endif 929 930 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 931 Node *n; 932 933 // Walk over all the nodes from last to first 934 while ((n = iter.next())) { 935 // Set the latency for the definitions of this instruction 936 partial_latency_of_defs(n); 937 } 938 } // end ComputeLatenciesBackwards 939 940 //------------------------------partial_latency_of_defs------------------------ 941 // Compute the latency impact of this node on all defs. This computes 942 // a number that increases as we approach the beginning of the routine. 943 void PhaseCFG::partial_latency_of_defs(Node *n) { 944 // Set the latency for this instruction 945 #ifndef PRODUCT 946 if (trace_opto_pipelining()) { 947 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 948 dump(); 949 } 950 #endif 951 952 if (n->is_Proj()) { 953 n = n->in(0); 954 } 955 956 if (n->is_Root()) { 957 return; 958 } 959 960 uint nlen = n->len(); 961 uint use_latency = get_latency_for_node(n); 962 uint use_pre_order = get_block_for_node(n)->_pre_order; 963 964 for (uint j = 0; j < nlen; j++) { 965 Node *def = n->in(j); 966 967 if (!def || def == n) { 968 continue; 969 } 970 971 // Walk backwards thru projections 972 if (def->is_Proj()) { 973 def = def->in(0); 974 } 975 976 #ifndef PRODUCT 977 if (trace_opto_pipelining()) { 978 tty->print("# in(%2d): ", j); 979 def->dump(); 980 } 981 #endif 982 983 // If the defining block is not known, assume it is ok 984 Block *def_block = get_block_for_node(def); 985 uint def_pre_order = def_block ? def_block->_pre_order : 0; 986 987 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 988 continue; 989 } 990 991 uint delta_latency = n->latency(j); 992 uint current_latency = delta_latency + use_latency; 993 994 if (get_latency_for_node(def) < current_latency) { 995 set_latency_for_node(def, current_latency); 996 } 997 998 #ifndef PRODUCT 999 if (trace_opto_pipelining()) { 1000 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 1001 } 1002 #endif 1003 } 1004 } 1005 1006 //------------------------------latency_from_use------------------------------- 1007 // Compute the latency of a specific use 1008 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 1009 // If self-reference, return no latency 1010 if (use == n || use->is_Root()) { 1011 return 0; 1012 } 1013 1014 uint def_pre_order = get_block_for_node(def)->_pre_order; 1015 uint latency = 0; 1016 1017 // If the use is not a projection, then it is simple... 1018 if (!use->is_Proj()) { 1019 #ifndef PRODUCT 1020 if (trace_opto_pipelining()) { 1021 tty->print("# out(): "); 1022 use->dump(); 1023 } 1024 #endif 1025 1026 uint use_pre_order = get_block_for_node(use)->_pre_order; 1027 1028 if (use_pre_order < def_pre_order) 1029 return 0; 1030 1031 if (use_pre_order == def_pre_order && use->is_Phi()) 1032 return 0; 1033 1034 uint nlen = use->len(); 1035 uint nl = get_latency_for_node(use); 1036 1037 for ( uint j=0; j<nlen; j++ ) { 1038 if (use->in(j) == n) { 1039 // Change this if we want local latencies 1040 uint ul = use->latency(j); 1041 uint l = ul + nl; 1042 if (latency < l) latency = l; 1043 #ifndef PRODUCT 1044 if (trace_opto_pipelining()) { 1045 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 1046 nl, j, ul, l, latency); 1047 } 1048 #endif 1049 } 1050 } 1051 } else { 1052 // This is a projection, just grab the latency of the use(s) 1053 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1054 uint l = latency_from_use(use, def, use->fast_out(j)); 1055 if (latency < l) latency = l; 1056 } 1057 } 1058 1059 return latency; 1060 } 1061 1062 //------------------------------latency_from_uses------------------------------ 1063 // Compute the latency of this instruction relative to all of it's uses. 1064 // This computes a number that increases as we approach the beginning of the 1065 // routine. 1066 void PhaseCFG::latency_from_uses(Node *n) { 1067 // Set the latency for this instruction 1068 #ifndef PRODUCT 1069 if (trace_opto_pipelining()) { 1070 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1071 dump(); 1072 } 1073 #endif 1074 uint latency=0; 1075 const Node *def = n->is_Proj() ? n->in(0): n; 1076 1077 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1078 uint l = latency_from_use(n, def, n->fast_out(i)); 1079 1080 if (latency < l) latency = l; 1081 } 1082 1083 set_latency_for_node(n, latency); 1084 } 1085 1086 //------------------------------hoist_to_cheaper_block------------------------- 1087 // Pick a block for node self, between early and LCA, that is a cheaper 1088 // alternative to LCA. 1089 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1090 const double delta = 1+PROB_UNLIKELY_MAG(4); 1091 Block* least = LCA; 1092 double least_freq = least->_freq; 1093 uint target = get_latency_for_node(self); 1094 uint start_latency = get_latency_for_node(LCA->head()); 1095 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1096 bool in_latency = (target <= start_latency); 1097 const Block* root_block = get_block_for_node(_root); 1098 1099 // Turn off latency scheduling if scheduling is just plain off 1100 if (!C->do_scheduling()) 1101 in_latency = true; 1102 1103 // Do not hoist (to cover latency) instructions which target a 1104 // single register. Hoisting stretches the live range of the 1105 // single register and may force spilling. 1106 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1107 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1108 in_latency = true; 1109 1110 #ifndef PRODUCT 1111 if (trace_opto_pipelining()) { 1112 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1113 self->dump(); 1114 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1115 LCA->_pre_order, 1116 LCA->head()->_idx, 1117 start_latency, 1118 LCA->get_node(LCA->end_idx())->_idx, 1119 end_latency, 1120 least_freq); 1121 } 1122 #endif 1123 1124 int cand_cnt = 0; // number of candidates tried 1125 1126 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1127 // the earliest legal location. Capture the least execution frequency. 1128 while (LCA != early) { 1129 LCA = LCA->_idom; // Follow up the dominator tree 1130 1131 if (LCA == NULL) { 1132 // Bailout without retry 1133 assert(false, "graph should be schedulable"); 1134 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1135 return least; 1136 } 1137 1138 // Don't hoist machine instructions to the root basic block 1139 if (mach && LCA == root_block) 1140 break; 1141 1142 uint start_lat = get_latency_for_node(LCA->head()); 1143 uint end_idx = LCA->end_idx(); 1144 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1145 double LCA_freq = LCA->_freq; 1146 #ifndef PRODUCT 1147 if (trace_opto_pipelining()) { 1148 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1149 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1150 } 1151 #endif 1152 cand_cnt++; 1153 if (LCA_freq < least_freq || // Better Frequency 1154 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1155 (!StressGCM && // Otherwise, choose with latency 1156 !in_latency && // No block containing latency 1157 LCA_freq < least_freq * delta && // No worse frequency 1158 target >= end_lat && // within latency range 1159 !self->is_iteratively_computed() ) // But don't hoist IV increments 1160 // because they may end up above other uses of their phi forcing 1161 // their result register to be different from their input. 1162 ) { 1163 least = LCA; // Found cheaper block 1164 least_freq = LCA_freq; 1165 start_latency = start_lat; 1166 end_latency = end_lat; 1167 if (target <= start_lat) 1168 in_latency = true; 1169 } 1170 } 1171 1172 #ifndef PRODUCT 1173 if (trace_opto_pipelining()) { 1174 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1175 least->_pre_order, start_latency, least_freq); 1176 } 1177 #endif 1178 1179 // See if the latency needs to be updated 1180 if (target < end_latency) { 1181 #ifndef PRODUCT 1182 if (trace_opto_pipelining()) { 1183 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1184 } 1185 #endif 1186 set_latency_for_node(self, end_latency); 1187 partial_latency_of_defs(self); 1188 } 1189 1190 return least; 1191 } 1192 1193 1194 //------------------------------schedule_late----------------------------------- 1195 // Now schedule all codes as LATE as possible. This is the LCA in the 1196 // dominator tree of all USES of a value. Pick the block with the least 1197 // loop nesting depth that is lowest in the dominator tree. 1198 extern const char must_clone[]; 1199 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { 1200 #ifndef PRODUCT 1201 if (trace_opto_pipelining()) 1202 tty->print("\n#---- schedule_late ----\n"); 1203 #endif 1204 1205 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1206 Node *self; 1207 1208 // Walk over all the nodes from last to first 1209 while ((self = iter.next())) { 1210 Block* early = get_block_for_node(self); // Earliest legal placement 1211 1212 if (self->is_top()) { 1213 // Top node goes in bb #2 with other constants. 1214 // It must be special-cased, because it has no out edges. 1215 early->add_inst(self); 1216 continue; 1217 } 1218 1219 // No uses, just terminate 1220 if (self->outcnt() == 0) { 1221 assert(self->is_MachProj(), "sanity"); 1222 continue; // Must be a dead machine projection 1223 } 1224 1225 // If node is pinned in the block, then no scheduling can be done. 1226 if( self->pinned() ) // Pinned in block? 1227 continue; 1228 1229 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1230 if (mach) { 1231 switch (mach->ideal_Opcode()) { 1232 case Op_CreateEx: 1233 // Don't move exception creation 1234 early->add_inst(self); 1235 continue; 1236 break; 1237 case Op_CheckCastPP: { 1238 // Don't move CheckCastPP nodes away from their input, if the input 1239 // is a rawptr (5071820). 1240 Node *def = self->in(1); 1241 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1242 early->add_inst(self); 1243 #ifdef ASSERT 1244 _raw_oops.push(def); 1245 #endif 1246 continue; 1247 } 1248 break; 1249 } 1250 default: 1251 break; 1252 } 1253 } 1254 1255 // Gather LCA of all uses 1256 Block *LCA = NULL; 1257 { 1258 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1259 // For all uses, find LCA 1260 Node* use = self->fast_out(i); 1261 LCA = raise_LCA_above_use(LCA, use, self, this); 1262 } 1263 } // (Hide defs of imax, i from rest of block.) 1264 1265 // Place temps in the block of their use. This isn't a 1266 // requirement for correctness but it reduces useless 1267 // interference between temps and other nodes. 1268 if (mach != NULL && mach->is_MachTemp()) { 1269 map_node_to_block(self, LCA); 1270 LCA->add_inst(self); 1271 continue; 1272 } 1273 1274 // Check if 'self' could be anti-dependent on memory 1275 if (self->needs_anti_dependence_check()) { 1276 // Hoist LCA above possible-defs and insert anti-dependences to 1277 // defs in new LCA block. 1278 LCA = insert_anti_dependences(LCA, self); 1279 } 1280 1281 if (early->_dom_depth > LCA->_dom_depth) { 1282 // Somehow the LCA has moved above the earliest legal point. 1283 // (One way this can happen is via memory_early_block.) 1284 if (C->subsume_loads() == true && !C->failing()) { 1285 // Retry with subsume_loads == false 1286 // If this is the first failure, the sentinel string will "stick" 1287 // to the Compile object, and the C2Compiler will see it and retry. 1288 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1289 } else { 1290 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1291 assert(false, "graph should be schedulable"); 1292 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1293 } 1294 return; 1295 } 1296 1297 // If there is no opportunity to hoist, then we're done. 1298 // In stress mode, try to hoist even the single operations. 1299 bool try_to_hoist = StressGCM || (LCA != early); 1300 1301 // Must clone guys stay next to use; no hoisting allowed. 1302 // Also cannot hoist guys that alter memory or are otherwise not 1303 // allocatable (hoisting can make a value live longer, leading to 1304 // anti and output dependency problems which are normally resolved 1305 // by the register allocator giving everyone a different register). 1306 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1307 try_to_hoist = false; 1308 1309 Block* late = NULL; 1310 if (try_to_hoist) { 1311 // Now find the block with the least execution frequency. 1312 // Start at the latest schedule and work up to the earliest schedule 1313 // in the dominator tree. Thus the Node will dominate all its uses. 1314 late = hoist_to_cheaper_block(LCA, early, self); 1315 } else { 1316 // Just use the LCA of the uses. 1317 late = LCA; 1318 } 1319 1320 // Put the node into target block 1321 schedule_node_into_block(self, late); 1322 1323 #ifdef ASSERT 1324 if (self->needs_anti_dependence_check()) { 1325 // since precedence edges are only inserted when we're sure they 1326 // are needed make sure that after placement in a block we don't 1327 // need any new precedence edges. 1328 verify_anti_dependences(late, self); 1329 } 1330 #endif 1331 } // Loop until all nodes have been visited 1332 1333 } // end ScheduleLate 1334 1335 //------------------------------GlobalCodeMotion------------------------------- 1336 void PhaseCFG::global_code_motion() { 1337 ResourceMark rm; 1338 1339 #ifndef PRODUCT 1340 if (trace_opto_pipelining()) { 1341 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1342 } 1343 #endif 1344 1345 // Initialize the node to block mapping for things on the proj_list 1346 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1347 unmap_node_from_block(_matcher.get_projection(i)); 1348 } 1349 1350 // Set the basic block for Nodes pinned into blocks 1351 Arena* arena = Thread::current()->resource_area(); 1352 VectorSet visited(arena); 1353 schedule_pinned_nodes(visited); 1354 1355 // Find the earliest Block any instruction can be placed in. Some 1356 // instructions are pinned into Blocks. Unpinned instructions can 1357 // appear in last block in which all their inputs occur. 1358 visited.Clear(); 1359 Node_Stack stack(arena, (C->live_nodes() >> 2) + 16); // pre-grow 1360 if (!schedule_early(visited, stack)) { 1361 // Bailout without retry 1362 C->record_method_not_compilable("early schedule failed"); 1363 return; 1364 } 1365 1366 // Build Def-Use edges. 1367 // Compute the latency information (via backwards walk) for all the 1368 // instructions in the graph 1369 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1370 1371 if (C->do_scheduling()) { 1372 compute_latencies_backwards(visited, stack); 1373 } 1374 1375 // Now schedule all codes as LATE as possible. This is the LCA in the 1376 // dominator tree of all USES of a value. Pick the block with the least 1377 // loop nesting depth that is lowest in the dominator tree. 1378 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1379 schedule_late(visited, stack); 1380 if (C->failing()) { 1381 // schedule_late fails only when graph is incorrect. 1382 assert(!VerifyGraphEdges, "verification should have failed"); 1383 return; 1384 } 1385 1386 #ifndef PRODUCT 1387 if (trace_opto_pipelining()) { 1388 tty->print("\n---- Detect implicit null checks ----\n"); 1389 } 1390 #endif 1391 1392 // Detect implicit-null-check opportunities. Basically, find NULL checks 1393 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1394 // I can generate a memory op if there is not one nearby. 1395 if (C->is_method_compilation()) { 1396 // By reversing the loop direction we get a very minor gain on mpegaudio. 1397 // Feel free to revert to a forward loop for clarity. 1398 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1399 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1400 Node* proj = _matcher._null_check_tests[i]; 1401 Node* val = _matcher._null_check_tests[i + 1]; 1402 Block* block = get_block_for_node(proj); 1403 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1404 // The implicit_null_check will only perform the transformation 1405 // if the null branch is truly uncommon, *and* it leads to an 1406 // uncommon trap. Combined with the too_many_traps guards 1407 // above, this prevents SEGV storms reported in 6366351, 1408 // by recompiling offending methods without this optimization. 1409 } 1410 } 1411 1412 bool block_size_threshold_ok = false; 1413 intptr_t *recalc_pressure_nodes = NULL; 1414 if (OptoRegScheduling) { 1415 for (uint i = 0; i < number_of_blocks(); i++) { 1416 Block* block = get_block(i); 1417 if (block->number_of_nodes() > 10) { 1418 block_size_threshold_ok = true; 1419 break; 1420 } 1421 } 1422 } 1423 1424 // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it 1425 // is key to enabling this feature. 1426 PhaseChaitin regalloc(C->unique(), *this, _matcher, true); 1427 ResourceArea live_arena(mtCompiler); // Arena for liveness 1428 ResourceMark rm_live(&live_arena); 1429 PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true); 1430 PhaseIFG ifg(&live_arena); 1431 if (OptoRegScheduling && block_size_threshold_ok) { 1432 regalloc.mark_ssa(); 1433 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 1434 rm_live.reset_to_mark(); // Reclaim working storage 1435 IndexSet::reset_memory(C, &live_arena); 1436 uint node_size = regalloc._lrg_map.max_lrg_id(); 1437 ifg.init(node_size); // Empty IFG 1438 regalloc.set_ifg(ifg); 1439 regalloc.set_live(live); 1440 regalloc.gather_lrg_masks(false); // Collect LRG masks 1441 live.compute(node_size); // Compute liveness 1442 1443 recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size); 1444 for (uint i = 0; i < node_size; i++) { 1445 recalc_pressure_nodes[i] = 0; 1446 } 1447 } 1448 _regalloc = ®alloc; 1449 1450 #ifndef PRODUCT 1451 if (trace_opto_pipelining()) { 1452 tty->print("\n---- Start Local Scheduling ----\n"); 1453 } 1454 #endif 1455 1456 // Schedule locally. Right now a simple topological sort. 1457 // Later, do a real latency aware scheduler. 1458 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1459 visited.Clear(); 1460 for (uint i = 0; i < number_of_blocks(); i++) { 1461 Block* block = get_block(i); 1462 if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) { 1463 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1464 C->record_method_not_compilable("local schedule failed"); 1465 } 1466 _regalloc = NULL; 1467 return; 1468 } 1469 } 1470 _regalloc = NULL; 1471 1472 // If we inserted any instructions between a Call and his CatchNode, 1473 // clone the instructions on all paths below the Catch. 1474 for (uint i = 0; i < number_of_blocks(); i++) { 1475 Block* block = get_block(i); 1476 call_catch_cleanup(block); 1477 } 1478 1479 #ifndef PRODUCT 1480 if (trace_opto_pipelining()) { 1481 tty->print("\n---- After GlobalCodeMotion ----\n"); 1482 for (uint i = 0; i < number_of_blocks(); i++) { 1483 Block* block = get_block(i); 1484 block->dump(); 1485 } 1486 } 1487 #endif 1488 // Dead. 1489 _node_latency = (GrowableArray<uint> *)0xdeadbeef; 1490 } 1491 1492 bool PhaseCFG::do_global_code_motion() { 1493 1494 build_dominator_tree(); 1495 if (C->failing()) { 1496 return false; 1497 } 1498 1499 NOT_PRODUCT( C->verify_graph_edges(); ) 1500 1501 estimate_block_frequency(); 1502 1503 global_code_motion(); 1504 1505 if (C->failing()) { 1506 return false; 1507 } 1508 1509 return true; 1510 } 1511 1512 //------------------------------Estimate_Block_Frequency----------------------- 1513 // Estimate block frequencies based on IfNode probabilities. 1514 void PhaseCFG::estimate_block_frequency() { 1515 1516 // Force conditional branches leading to uncommon traps to be unlikely, 1517 // not because we get to the uncommon_trap with less relative frequency, 1518 // but because an uncommon_trap typically causes a deopt, so we only get 1519 // there once. 1520 if (C->do_freq_based_layout()) { 1521 Block_List worklist; 1522 Block* root_blk = get_block(0); 1523 for (uint i = 1; i < root_blk->num_preds(); i++) { 1524 Block *pb = get_block_for_node(root_blk->pred(i)); 1525 if (pb->has_uncommon_code()) { 1526 worklist.push(pb); 1527 } 1528 } 1529 while (worklist.size() > 0) { 1530 Block* uct = worklist.pop(); 1531 if (uct == get_root_block()) { 1532 continue; 1533 } 1534 for (uint i = 1; i < uct->num_preds(); i++) { 1535 Block *pb = get_block_for_node(uct->pred(i)); 1536 if (pb->_num_succs == 1) { 1537 worklist.push(pb); 1538 } else if (pb->num_fall_throughs() == 2) { 1539 pb->update_uncommon_branch(uct); 1540 } 1541 } 1542 } 1543 } 1544 1545 // Create the loop tree and calculate loop depth. 1546 _root_loop = create_loop_tree(); 1547 _root_loop->compute_loop_depth(0); 1548 1549 // Compute block frequency of each block, relative to a single loop entry. 1550 _root_loop->compute_freq(); 1551 1552 // Adjust all frequencies to be relative to a single method entry 1553 _root_loop->_freq = 1.0; 1554 _root_loop->scale_freq(); 1555 1556 // Save outmost loop frequency for LRG frequency threshold 1557 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1558 1559 // force paths ending at uncommon traps to be infrequent 1560 if (!C->do_freq_based_layout()) { 1561 Block_List worklist; 1562 Block* root_blk = get_block(0); 1563 for (uint i = 1; i < root_blk->num_preds(); i++) { 1564 Block *pb = get_block_for_node(root_blk->pred(i)); 1565 if (pb->has_uncommon_code()) { 1566 worklist.push(pb); 1567 } 1568 } 1569 while (worklist.size() > 0) { 1570 Block* uct = worklist.pop(); 1571 uct->_freq = PROB_MIN; 1572 for (uint i = 1; i < uct->num_preds(); i++) { 1573 Block *pb = get_block_for_node(uct->pred(i)); 1574 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1575 worklist.push(pb); 1576 } 1577 } 1578 } 1579 } 1580 1581 #ifdef ASSERT 1582 for (uint i = 0; i < number_of_blocks(); i++) { 1583 Block* b = get_block(i); 1584 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1585 } 1586 #endif 1587 1588 #ifndef PRODUCT 1589 if (PrintCFGBlockFreq) { 1590 tty->print_cr("CFG Block Frequencies"); 1591 _root_loop->dump_tree(); 1592 if (Verbose) { 1593 tty->print_cr("PhaseCFG dump"); 1594 dump(); 1595 tty->print_cr("Node dump"); 1596 _root->dump(99999); 1597 } 1598 } 1599 #endif 1600 } 1601 1602 //----------------------------create_loop_tree-------------------------------- 1603 // Create a loop tree from the CFG 1604 CFGLoop* PhaseCFG::create_loop_tree() { 1605 1606 #ifdef ASSERT 1607 assert(get_block(0) == get_root_block(), "first block should be root block"); 1608 for (uint i = 0; i < number_of_blocks(); i++) { 1609 Block* block = get_block(i); 1610 // Check that _loop field are clear...we could clear them if not. 1611 assert(block->_loop == NULL, "clear _loop expected"); 1612 // Sanity check that the RPO numbering is reflected in the _blocks array. 1613 // It doesn't have to be for the loop tree to be built, but if it is not, 1614 // then the blocks have been reordered since dom graph building...which 1615 // may question the RPO numbering 1616 assert(block->_rpo == i, "unexpected reverse post order number"); 1617 } 1618 #endif 1619 1620 int idct = 0; 1621 CFGLoop* root_loop = new CFGLoop(idct++); 1622 1623 Block_List worklist; 1624 1625 // Assign blocks to loops 1626 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1627 Block* block = get_block(i); 1628 1629 if (block->head()->is_Loop()) { 1630 Block* loop_head = block; 1631 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1632 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1633 Block* tail = get_block_for_node(tail_n); 1634 1635 // Defensively filter out Loop nodes for non-single-entry loops. 1636 // For all reasonable loops, the head occurs before the tail in RPO. 1637 if (i <= tail->_rpo) { 1638 1639 // The tail and (recursive) predecessors of the tail 1640 // are made members of a new loop. 1641 1642 assert(worklist.size() == 0, "nonempty worklist"); 1643 CFGLoop* nloop = new CFGLoop(idct++); 1644 assert(loop_head->_loop == NULL, "just checking"); 1645 loop_head->_loop = nloop; 1646 // Add to nloop so push_pred() will skip over inner loops 1647 nloop->add_member(loop_head); 1648 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1649 1650 while (worklist.size() > 0) { 1651 Block* member = worklist.pop(); 1652 if (member != loop_head) { 1653 for (uint j = 1; j < member->num_preds(); j++) { 1654 nloop->push_pred(member, j, worklist, this); 1655 } 1656 } 1657 } 1658 } 1659 } 1660 } 1661 1662 // Create a member list for each loop consisting 1663 // of both blocks and (immediate child) loops. 1664 for (uint i = 0; i < number_of_blocks(); i++) { 1665 Block* block = get_block(i); 1666 CFGLoop* lp = block->_loop; 1667 if (lp == NULL) { 1668 // Not assigned to a loop. Add it to the method's pseudo loop. 1669 block->_loop = root_loop; 1670 lp = root_loop; 1671 } 1672 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1673 lp->add_member(block); 1674 } 1675 if (lp != root_loop) { 1676 if (lp->parent() == NULL) { 1677 // Not a nested loop. Make it a child of the method's pseudo loop. 1678 root_loop->add_nested_loop(lp); 1679 } 1680 if (block == lp->head()) { 1681 // Add nested loop to member list of parent loop. 1682 lp->parent()->add_member(lp); 1683 } 1684 } 1685 } 1686 1687 return root_loop; 1688 } 1689 1690 //------------------------------push_pred-------------------------------------- 1691 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1692 Node* pred_n = blk->pred(i); 1693 Block* pred = cfg->get_block_for_node(pred_n); 1694 CFGLoop *pred_loop = pred->_loop; 1695 if (pred_loop == NULL) { 1696 // Filter out blocks for non-single-entry loops. 1697 // For all reasonable loops, the head occurs before the tail in RPO. 1698 if (pred->_rpo > head()->_rpo) { 1699 pred->_loop = this; 1700 worklist.push(pred); 1701 } 1702 } else if (pred_loop != this) { 1703 // Nested loop. 1704 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1705 pred_loop = pred_loop->_parent; 1706 } 1707 // Make pred's loop be a child 1708 if (pred_loop->_parent == NULL) { 1709 add_nested_loop(pred_loop); 1710 // Continue with loop entry predecessor. 1711 Block* pred_head = pred_loop->head(); 1712 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1713 assert(pred_head != head(), "loop head in only one loop"); 1714 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1715 } else { 1716 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1717 } 1718 } 1719 } 1720 1721 //------------------------------add_nested_loop-------------------------------- 1722 // Make cl a child of the current loop in the loop tree. 1723 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1724 assert(_parent == NULL, "no parent yet"); 1725 assert(cl != this, "not my own parent"); 1726 cl->_parent = this; 1727 CFGLoop* ch = _child; 1728 if (ch == NULL) { 1729 _child = cl; 1730 } else { 1731 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1732 ch->_sibling = cl; 1733 } 1734 } 1735 1736 //------------------------------compute_loop_depth----------------------------- 1737 // Store the loop depth in each CFGLoop object. 1738 // Recursively walk the children to do the same for them. 1739 void CFGLoop::compute_loop_depth(int depth) { 1740 _depth = depth; 1741 CFGLoop* ch = _child; 1742 while (ch != NULL) { 1743 ch->compute_loop_depth(depth + 1); 1744 ch = ch->_sibling; 1745 } 1746 } 1747 1748 //------------------------------compute_freq----------------------------------- 1749 // Compute the frequency of each block and loop, relative to a single entry 1750 // into the dominating loop head. 1751 void CFGLoop::compute_freq() { 1752 // Bottom up traversal of loop tree (visit inner loops first.) 1753 // Set loop head frequency to 1.0, then transitively 1754 // compute frequency for all successors in the loop, 1755 // as well as for each exit edge. Inner loops are 1756 // treated as single blocks with loop exit targets 1757 // as the successor blocks. 1758 1759 // Nested loops first 1760 CFGLoop* ch = _child; 1761 while (ch != NULL) { 1762 ch->compute_freq(); 1763 ch = ch->_sibling; 1764 } 1765 assert (_members.length() > 0, "no empty loops"); 1766 Block* hd = head(); 1767 hd->_freq = 1.0; 1768 for (int i = 0; i < _members.length(); i++) { 1769 CFGElement* s = _members.at(i); 1770 double freq = s->_freq; 1771 if (s->is_block()) { 1772 Block* b = s->as_Block(); 1773 for (uint j = 0; j < b->_num_succs; j++) { 1774 Block* sb = b->_succs[j]; 1775 update_succ_freq(sb, freq * b->succ_prob(j)); 1776 } 1777 } else { 1778 CFGLoop* lp = s->as_CFGLoop(); 1779 assert(lp->_parent == this, "immediate child"); 1780 for (int k = 0; k < lp->_exits.length(); k++) { 1781 Block* eb = lp->_exits.at(k).get_target(); 1782 double prob = lp->_exits.at(k).get_prob(); 1783 update_succ_freq(eb, freq * prob); 1784 } 1785 } 1786 } 1787 1788 // For all loops other than the outer, "method" loop, 1789 // sum and normalize the exit probability. The "method" loop 1790 // should keep the initial exit probability of 1, so that 1791 // inner blocks do not get erroneously scaled. 1792 if (_depth != 0) { 1793 // Total the exit probabilities for this loop. 1794 double exits_sum = 0.0f; 1795 for (int i = 0; i < _exits.length(); i++) { 1796 exits_sum += _exits.at(i).get_prob(); 1797 } 1798 1799 // Normalize the exit probabilities. Until now, the 1800 // probabilities estimate the possibility of exit per 1801 // a single loop iteration; afterward, they estimate 1802 // the probability of exit per loop entry. 1803 for (int i = 0; i < _exits.length(); i++) { 1804 Block* et = _exits.at(i).get_target(); 1805 float new_prob = 0.0f; 1806 if (_exits.at(i).get_prob() > 0.0f) { 1807 new_prob = _exits.at(i).get_prob() / exits_sum; 1808 } 1809 BlockProbPair bpp(et, new_prob); 1810 _exits.at_put(i, bpp); 1811 } 1812 1813 // Save the total, but guard against unreasonable probability, 1814 // as the value is used to estimate the loop trip count. 1815 // An infinite trip count would blur relative block 1816 // frequencies. 1817 if (exits_sum > 1.0f) exits_sum = 1.0; 1818 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1819 _exit_prob = exits_sum; 1820 } 1821 } 1822 1823 //------------------------------succ_prob------------------------------------- 1824 // Determine the probability of reaching successor 'i' from the receiver block. 1825 float Block::succ_prob(uint i) { 1826 int eidx = end_idx(); 1827 Node *n = get_node(eidx); // Get ending Node 1828 1829 int op = n->Opcode(); 1830 if (n->is_Mach()) { 1831 if (n->is_MachNullCheck()) { 1832 // Can only reach here if called after lcm. The original Op_If is gone, 1833 // so we attempt to infer the probability from one or both of the 1834 // successor blocks. 1835 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1836 // If either successor has only one predecessor, then the 1837 // probability estimate can be derived using the 1838 // relative frequency of the successor and this block. 1839 if (_succs[i]->num_preds() == 2) { 1840 return _succs[i]->_freq / _freq; 1841 } else if (_succs[1-i]->num_preds() == 2) { 1842 return 1 - (_succs[1-i]->_freq / _freq); 1843 } else { 1844 // Estimate using both successor frequencies 1845 float freq = _succs[i]->_freq; 1846 return freq / (freq + _succs[1-i]->_freq); 1847 } 1848 } 1849 op = n->as_Mach()->ideal_Opcode(); 1850 } 1851 1852 1853 // Switch on branch type 1854 switch( op ) { 1855 case Op_CountedLoopEnd: 1856 case Op_If: { 1857 assert (i < 2, "just checking"); 1858 // Conditionals pass on only part of their frequency 1859 float prob = n->as_MachIf()->_prob; 1860 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1861 // If succ[i] is the FALSE branch, invert path info 1862 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1863 return 1.0f - prob; // not taken 1864 } else { 1865 return prob; // taken 1866 } 1867 } 1868 1869 case Op_Jump: 1870 // Divide the frequency between all successors evenly 1871 return 1.0f/_num_succs; 1872 1873 case Op_Catch: { 1874 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1875 if (ci->_con == CatchProjNode::fall_through_index) { 1876 // Fall-thru path gets the lion's share. 1877 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1878 } else { 1879 // Presume exceptional paths are equally unlikely 1880 return PROB_UNLIKELY_MAG(5); 1881 } 1882 } 1883 1884 case Op_Root: 1885 case Op_Goto: 1886 // Pass frequency straight thru to target 1887 return 1.0f; 1888 1889 case Op_NeverBranch: 1890 return 0.0f; 1891 1892 case Op_TailCall: 1893 case Op_TailJump: 1894 case Op_Return: 1895 case Op_Halt: 1896 case Op_Rethrow: 1897 // Do not push out freq to root block 1898 return 0.0f; 1899 1900 default: 1901 ShouldNotReachHere(); 1902 } 1903 1904 return 0.0f; 1905 } 1906 1907 //------------------------------num_fall_throughs----------------------------- 1908 // Return the number of fall-through candidates for a block 1909 int Block::num_fall_throughs() { 1910 int eidx = end_idx(); 1911 Node *n = get_node(eidx); // Get ending Node 1912 1913 int op = n->Opcode(); 1914 if (n->is_Mach()) { 1915 if (n->is_MachNullCheck()) { 1916 // In theory, either side can fall-thru, for simplicity sake, 1917 // let's say only the false branch can now. 1918 return 1; 1919 } 1920 op = n->as_Mach()->ideal_Opcode(); 1921 } 1922 1923 // Switch on branch type 1924 switch( op ) { 1925 case Op_CountedLoopEnd: 1926 case Op_If: 1927 return 2; 1928 1929 case Op_Root: 1930 case Op_Goto: 1931 return 1; 1932 1933 case Op_Catch: { 1934 for (uint i = 0; i < _num_succs; i++) { 1935 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1936 if (ci->_con == CatchProjNode::fall_through_index) { 1937 return 1; 1938 } 1939 } 1940 return 0; 1941 } 1942 1943 case Op_Jump: 1944 case Op_NeverBranch: 1945 case Op_TailCall: 1946 case Op_TailJump: 1947 case Op_Return: 1948 case Op_Halt: 1949 case Op_Rethrow: 1950 return 0; 1951 1952 default: 1953 ShouldNotReachHere(); 1954 } 1955 1956 return 0; 1957 } 1958 1959 //------------------------------succ_fall_through----------------------------- 1960 // Return true if a specific successor could be fall-through target. 1961 bool Block::succ_fall_through(uint i) { 1962 int eidx = end_idx(); 1963 Node *n = get_node(eidx); // Get ending Node 1964 1965 int op = n->Opcode(); 1966 if (n->is_Mach()) { 1967 if (n->is_MachNullCheck()) { 1968 // In theory, either side can fall-thru, for simplicity sake, 1969 // let's say only the false branch can now. 1970 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1971 } 1972 op = n->as_Mach()->ideal_Opcode(); 1973 } 1974 1975 // Switch on branch type 1976 switch( op ) { 1977 case Op_CountedLoopEnd: 1978 case Op_If: 1979 case Op_Root: 1980 case Op_Goto: 1981 return true; 1982 1983 case Op_Catch: { 1984 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1985 return ci->_con == CatchProjNode::fall_through_index; 1986 } 1987 1988 case Op_Jump: 1989 case Op_NeverBranch: 1990 case Op_TailCall: 1991 case Op_TailJump: 1992 case Op_Return: 1993 case Op_Halt: 1994 case Op_Rethrow: 1995 return false; 1996 1997 default: 1998 ShouldNotReachHere(); 1999 } 2000 2001 return false; 2002 } 2003 2004 //------------------------------update_uncommon_branch------------------------ 2005 // Update the probability of a two-branch to be uncommon 2006 void Block::update_uncommon_branch(Block* ub) { 2007 int eidx = end_idx(); 2008 Node *n = get_node(eidx); // Get ending Node 2009 2010 int op = n->as_Mach()->ideal_Opcode(); 2011 2012 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 2013 assert(num_fall_throughs() == 2, "must be a two way branch block"); 2014 2015 // Which successor is ub? 2016 uint s; 2017 for (s = 0; s <_num_succs; s++) { 2018 if (_succs[s] == ub) break; 2019 } 2020 assert(s < 2, "uncommon successor must be found"); 2021 2022 // If ub is the true path, make the proability small, else 2023 // ub is the false path, and make the probability large 2024 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 2025 2026 // Get existing probability 2027 float p = n->as_MachIf()->_prob; 2028 2029 if (invert) p = 1.0 - p; 2030 if (p > PROB_MIN) { 2031 p = PROB_MIN; 2032 } 2033 if (invert) p = 1.0 - p; 2034 2035 n->as_MachIf()->_prob = p; 2036 } 2037 2038 //------------------------------update_succ_freq------------------------------- 2039 // Update the appropriate frequency associated with block 'b', a successor of 2040 // a block in this loop. 2041 void CFGLoop::update_succ_freq(Block* b, double freq) { 2042 if (b->_loop == this) { 2043 if (b == head()) { 2044 // back branch within the loop 2045 // Do nothing now, the loop carried frequency will be 2046 // adjust later in scale_freq(). 2047 } else { 2048 // simple branch within the loop 2049 b->_freq += freq; 2050 } 2051 } else if (!in_loop_nest(b)) { 2052 // branch is exit from this loop 2053 BlockProbPair bpp(b, freq); 2054 _exits.append(bpp); 2055 } else { 2056 // branch into nested loop 2057 CFGLoop* ch = b->_loop; 2058 ch->_freq += freq; 2059 } 2060 } 2061 2062 //------------------------------in_loop_nest----------------------------------- 2063 // Determine if block b is in the receiver's loop nest. 2064 bool CFGLoop::in_loop_nest(Block* b) { 2065 int depth = _depth; 2066 CFGLoop* b_loop = b->_loop; 2067 int b_depth = b_loop->_depth; 2068 if (depth == b_depth) { 2069 return true; 2070 } 2071 while (b_depth > depth) { 2072 b_loop = b_loop->_parent; 2073 b_depth = b_loop->_depth; 2074 } 2075 return b_loop == this; 2076 } 2077 2078 //------------------------------scale_freq------------------------------------- 2079 // Scale frequency of loops and blocks by trip counts from outer loops 2080 // Do a top down traversal of loop tree (visit outer loops first.) 2081 void CFGLoop::scale_freq() { 2082 double loop_freq = _freq * trip_count(); 2083 _freq = loop_freq; 2084 for (int i = 0; i < _members.length(); i++) { 2085 CFGElement* s = _members.at(i); 2086 double block_freq = s->_freq * loop_freq; 2087 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 2088 block_freq = MIN_BLOCK_FREQUENCY; 2089 s->_freq = block_freq; 2090 } 2091 CFGLoop* ch = _child; 2092 while (ch != NULL) { 2093 ch->scale_freq(); 2094 ch = ch->_sibling; 2095 } 2096 } 2097 2098 // Frequency of outer loop 2099 double CFGLoop::outer_loop_freq() const { 2100 if (_child != NULL) { 2101 return _child->_freq; 2102 } 2103 return _freq; 2104 } 2105 2106 #ifndef PRODUCT 2107 //------------------------------dump_tree-------------------------------------- 2108 void CFGLoop::dump_tree() const { 2109 dump(); 2110 if (_child != NULL) _child->dump_tree(); 2111 if (_sibling != NULL) _sibling->dump_tree(); 2112 } 2113 2114 //------------------------------dump------------------------------------------- 2115 void CFGLoop::dump() const { 2116 for (int i = 0; i < _depth; i++) tty->print(" "); 2117 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2118 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2119 for (int i = 0; i < _depth; i++) tty->print(" "); 2120 tty->print(" members:"); 2121 int k = 0; 2122 for (int i = 0; i < _members.length(); i++) { 2123 if (k++ >= 6) { 2124 tty->print("\n "); 2125 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2126 k = 0; 2127 } 2128 CFGElement *s = _members.at(i); 2129 if (s->is_block()) { 2130 Block *b = s->as_Block(); 2131 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2132 } else { 2133 CFGLoop* lp = s->as_CFGLoop(); 2134 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2135 } 2136 } 2137 tty->print("\n"); 2138 for (int i = 0; i < _depth; i++) tty->print(" "); 2139 tty->print(" exits: "); 2140 k = 0; 2141 for (int i = 0; i < _exits.length(); i++) { 2142 if (k++ >= 7) { 2143 tty->print("\n "); 2144 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2145 k = 0; 2146 } 2147 Block *blk = _exits.at(i).get_target(); 2148 double prob = _exits.at(i).get_prob(); 2149 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2150 } 2151 tty->print("\n"); 2152 } 2153 #endif