1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "opto/chaitin.hpp" 38 #include "runtime/deoptimization.hpp" 39 40 // Portions of code courtesy of Clifford Click 41 42 // Optimization - Graph Style 43 44 // To avoid float value underflow 45 #define MIN_BLOCK_FREQUENCY 1.e-35f 46 47 //----------------------------schedule_node_into_block------------------------- 48 // Insert node n into block b. Look for projections of n and make sure they 49 // are in b also. 50 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 51 // Set basic block of n, Add n to b, 52 map_node_to_block(n, b); 53 b->add_inst(n); 54 55 // After Matching, nearly any old Node may have projections trailing it. 56 // These are usually machine-dependent flags. In any case, they might 57 // float to another block below this one. Move them up. 58 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 59 Node* use = n->fast_out(i); 60 if (use->is_Proj()) { 61 Block* buse = get_block_for_node(use); 62 if (buse != b) { // In wrong block? 63 if (buse != NULL) { 64 buse->find_remove(use); // Remove from wrong block 65 } 66 map_node_to_block(use, b); 67 b->add_inst(use); 68 } 69 } 70 } 71 } 72 73 //----------------------------replace_block_proj_ctrl------------------------- 74 // Nodes that have is_block_proj() nodes as their control need to use 75 // the appropriate Region for their actual block as their control since 76 // the projection will be in a predecessor block. 77 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 78 const Node *in0 = n->in(0); 79 assert(in0 != NULL, "Only control-dependent"); 80 const Node *p = in0->is_block_proj(); 81 if (p != NULL && p != n) { // Control from a block projection? 82 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 83 // Find trailing Region 84 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 85 uint j = 0; 86 if (pb->_num_succs != 1) { // More then 1 successor? 87 // Search for successor 88 uint max = pb->number_of_nodes(); 89 assert( max > 1, "" ); 90 uint start = max - pb->_num_succs; 91 // Find which output path belongs to projection 92 for (j = start; j < max; j++) { 93 if( pb->get_node(j) == in0 ) 94 break; 95 } 96 assert( j < max, "must find" ); 97 // Change control to match head of successor basic block 98 j -= start; 99 } 100 n->set_req(0, pb->_succs[j]->head()); 101 } 102 } 103 104 static bool is_dominator(Block* d, Block* n) { 105 return d->dom_lca(n) == d; 106 } 107 108 //------------------------------schedule_pinned_nodes-------------------------- 109 // Set the basic block for Nodes pinned into blocks 110 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 111 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 112 GrowableArray <Node *> spstack(C->live_nodes() + 8); 113 spstack.push(_root); 114 while (spstack.is_nonempty()) { 115 Node* node = spstack.pop(); 116 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 117 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 118 assert(node->in(0), "pinned Node must have Control"); 119 // Before setting block replace block_proj control edge 120 replace_block_proj_ctrl(node); 121 Node* input = node->in(0); 122 while (!input->is_block_start()) { 123 input = input->in(0); 124 } 125 Block* block = get_block_for_node(input); // Basic block of controlling input 126 schedule_node_into_block(node, block); 127 } 128 129 // If the node has precedence edges (added when CastPP nodes are 130 // removed in final_graph_reshaping), fix the control of the 131 // node to cover the precedence edges and remove the 132 // dependencies. 133 Node* n = NULL; 134 for (uint i = node->len()-1; i >= node->req(); i--) { 135 Node* m = node->in(i); 136 if (m == NULL) continue; 137 // Skip the precedence edge if the test that guarded a CastPP: 138 // - was optimized out during escape analysis 139 // (OptimizePtrCompare): the CastPP's control isn't an end of 140 // block. 141 // - is moved in the branch of a dominating If: the control of 142 // the CastPP is then a Region. 143 if (m->is_block_proj() || m->is_block_start()) { 144 node->rm_prec(i); 145 if (n == NULL) { 146 n = m; 147 } else { 148 Block* bn = get_block_for_node(n); 149 Block* bm = get_block_for_node(m); 150 assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other"); 151 n = is_dominator(bn, bm) ? m : n; 152 } 153 } 154 } 155 if (n != NULL) { 156 assert(node->in(0), "control should have been set"); 157 Block* bn = get_block_for_node(n); 158 Block* bnode = get_block_for_node(node->in(0)); 159 assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other"); 160 if (!is_dominator(bn, bnode)) { 161 node->set_req(0, n); 162 } 163 } 164 165 // process all inputs that are non NULL 166 for (int i = node->req() - 1; i >= 0; --i) { 167 if (node->in(i) != NULL) { 168 spstack.push(node->in(i)); 169 } 170 } 171 } 172 } 173 } 174 175 #ifdef ASSERT 176 // Assert that new input b2 is dominated by all previous inputs. 177 // Check this by by seeing that it is dominated by b1, the deepest 178 // input observed until b2. 179 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 180 if (b1 == NULL) return; 181 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 182 Block* tmp = b2; 183 while (tmp != b1 && tmp != NULL) { 184 tmp = tmp->_idom; 185 } 186 if (tmp != b1) { 187 // Detected an unschedulable graph. Print some nice stuff and die. 188 tty->print_cr("!!! Unschedulable graph !!!"); 189 for (uint j=0; j<n->len(); j++) { // For all inputs 190 Node* inn = n->in(j); // Get input 191 if (inn == NULL) continue; // Ignore NULL, missing inputs 192 Block* inb = cfg->get_block_for_node(inn); 193 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 194 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 195 inn->dump(); 196 } 197 tty->print("Failing node: "); 198 n->dump(); 199 assert(false, "unscheduable graph"); 200 } 201 } 202 #endif 203 204 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 205 // Find the last input dominated by all other inputs. 206 Block* deepb = NULL; // Deepest block so far 207 int deepb_dom_depth = 0; 208 for (uint k = 0; k < n->len(); k++) { // For all inputs 209 Node* inn = n->in(k); // Get input 210 if (inn == NULL) continue; // Ignore NULL, missing inputs 211 Block* inb = cfg->get_block_for_node(inn); 212 assert(inb != NULL, "must already have scheduled this input"); 213 if (deepb_dom_depth < (int) inb->_dom_depth) { 214 // The new inb must be dominated by the previous deepb. 215 // The various inputs must be linearly ordered in the dom 216 // tree, or else there will not be a unique deepest block. 217 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 218 deepb = inb; // Save deepest block 219 deepb_dom_depth = deepb->_dom_depth; 220 } 221 } 222 assert(deepb != NULL, "must be at least one input to n"); 223 return deepb; 224 } 225 226 227 //------------------------------schedule_early--------------------------------- 228 // Find the earliest Block any instruction can be placed in. Some instructions 229 // are pinned into Blocks. Unpinned instructions can appear in last block in 230 // which all their inputs occur. 231 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 232 // Allocate stack with enough space to avoid frequent realloc 233 Node_Stack nstack(roots.Size() + 8); 234 // _root will be processed among C->top() inputs 235 roots.push(C->top()); 236 visited.set(C->top()->_idx); 237 238 while (roots.size() != 0) { 239 // Use local variables nstack_top_n & nstack_top_i to cache values 240 // on stack's top. 241 Node* parent_node = roots.pop(); 242 uint input_index = 0; 243 244 while (true) { 245 if (input_index == 0) { 246 // Fixup some control. Constants without control get attached 247 // to root and nodes that use is_block_proj() nodes should be attached 248 // to the region that starts their block. 249 const Node* control_input = parent_node->in(0); 250 if (control_input != NULL) { 251 replace_block_proj_ctrl(parent_node); 252 } else { 253 // Is a constant with NO inputs? 254 if (parent_node->req() == 1) { 255 parent_node->set_req(0, _root); 256 } 257 } 258 } 259 260 // First, visit all inputs and force them to get a block. If an 261 // input is already in a block we quit following inputs (to avoid 262 // cycles). Instead we put that Node on a worklist to be handled 263 // later (since IT'S inputs may not have a block yet). 264 265 // Assume all n's inputs will be processed 266 bool done = true; 267 268 while (input_index < parent_node->len()) { 269 Node* in = parent_node->in(input_index++); 270 if (in == NULL) { 271 continue; 272 } 273 274 int is_visited = visited.test_set(in->_idx); 275 if (!has_block(in)) { 276 if (is_visited) { 277 return false; 278 } 279 // Save parent node and next input's index. 280 nstack.push(parent_node, input_index); 281 // Process current input now. 282 parent_node = in; 283 input_index = 0; 284 // Not all n's inputs processed. 285 done = false; 286 break; 287 } else if (!is_visited) { 288 // Visit this guy later, using worklist 289 roots.push(in); 290 } 291 } 292 293 if (done) { 294 // All of n's inputs have been processed, complete post-processing. 295 296 // Some instructions are pinned into a block. These include Region, 297 // Phi, Start, Return, and other control-dependent instructions and 298 // any projections which depend on them. 299 if (!parent_node->pinned()) { 300 // Set earliest legal block. 301 Block* earliest_block = find_deepest_input(parent_node, this); 302 map_node_to_block(parent_node, earliest_block); 303 } else { 304 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 305 } 306 307 if (nstack.is_empty()) { 308 // Finished all nodes on stack. 309 // Process next node on the worklist 'roots'. 310 break; 311 } 312 // Get saved parent node and next input's index. 313 parent_node = nstack.node(); 314 input_index = nstack.index(); 315 nstack.pop(); 316 } 317 } 318 } 319 return true; 320 } 321 322 //------------------------------dom_lca---------------------------------------- 323 // Find least common ancestor in dominator tree 324 // LCA is a current notion of LCA, to be raised above 'this'. 325 // As a convenient boundary condition, return 'this' if LCA is NULL. 326 // Find the LCA of those two nodes. 327 Block* Block::dom_lca(Block* LCA) { 328 if (LCA == NULL || LCA == this) return this; 329 330 Block* anc = this; 331 while (anc->_dom_depth > LCA->_dom_depth) 332 anc = anc->_idom; // Walk up till anc is as high as LCA 333 334 while (LCA->_dom_depth > anc->_dom_depth) 335 LCA = LCA->_idom; // Walk up till LCA is as high as anc 336 337 while (LCA != anc) { // Walk both up till they are the same 338 LCA = LCA->_idom; 339 anc = anc->_idom; 340 } 341 342 return LCA; 343 } 344 345 //--------------------------raise_LCA_above_use-------------------------------- 346 // We are placing a definition, and have been given a def->use edge. 347 // The definition must dominate the use, so move the LCA upward in the 348 // dominator tree to dominate the use. If the use is a phi, adjust 349 // the LCA only with the phi input paths which actually use this def. 350 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 351 Block* buse = cfg->get_block_for_node(use); 352 if (buse == NULL) return LCA; // Unused killing Projs have no use block 353 if (!use->is_Phi()) return buse->dom_lca(LCA); 354 uint pmax = use->req(); // Number of Phi inputs 355 // Why does not this loop just break after finding the matching input to 356 // the Phi? Well...it's like this. I do not have true def-use/use-def 357 // chains. Means I cannot distinguish, from the def-use direction, which 358 // of many use-defs lead from the same use to the same def. That is, this 359 // Phi might have several uses of the same def. Each use appears in a 360 // different predecessor block. But when I enter here, I cannot distinguish 361 // which use-def edge I should find the predecessor block for. So I find 362 // them all. Means I do a little extra work if a Phi uses the same value 363 // more than once. 364 for (uint j=1; j<pmax; j++) { // For all inputs 365 if (use->in(j) == def) { // Found matching input? 366 Block* pred = cfg->get_block_for_node(buse->pred(j)); 367 LCA = pred->dom_lca(LCA); 368 } 369 } 370 return LCA; 371 } 372 373 //----------------------------raise_LCA_above_marks---------------------------- 374 // Return a new LCA that dominates LCA and any of its marked predecessors. 375 // Search all my parents up to 'early' (exclusive), looking for predecessors 376 // which are marked with the given index. Return the LCA (in the dom tree) 377 // of all marked blocks. If there are none marked, return the original 378 // LCA. 379 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 380 Block_List worklist; 381 worklist.push(LCA); 382 while (worklist.size() > 0) { 383 Block* mid = worklist.pop(); 384 if (mid == early) continue; // stop searching here 385 386 // Test and set the visited bit. 387 if (mid->raise_LCA_visited() == mark) continue; // already visited 388 389 // Don't process the current LCA, otherwise the search may terminate early 390 if (mid != LCA && mid->raise_LCA_mark() == mark) { 391 // Raise the LCA. 392 LCA = mid->dom_lca(LCA); 393 if (LCA == early) break; // stop searching everywhere 394 assert(early->dominates(LCA), "early is high enough"); 395 // Resume searching at that point, skipping intermediate levels. 396 worklist.push(LCA); 397 if (LCA == mid) 398 continue; // Don't mark as visited to avoid early termination. 399 } else { 400 // Keep searching through this block's predecessors. 401 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 402 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 403 worklist.push(mid_parent); 404 } 405 } 406 mid->set_raise_LCA_visited(mark); 407 } 408 return LCA; 409 } 410 411 //--------------------------memory_early_block-------------------------------- 412 // This is a variation of find_deepest_input, the heart of schedule_early. 413 // Find the "early" block for a load, if we considered only memory and 414 // address inputs, that is, if other data inputs were ignored. 415 // 416 // Because a subset of edges are considered, the resulting block will 417 // be earlier (at a shallower dom_depth) than the true schedule_early 418 // point of the node. We compute this earlier block as a more permissive 419 // site for anti-dependency insertion, but only if subsume_loads is enabled. 420 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 421 Node* base; 422 Node* index; 423 Node* store = load->in(MemNode::Memory); 424 load->as_Mach()->memory_inputs(base, index); 425 426 assert(base != NodeSentinel && index != NodeSentinel, 427 "unexpected base/index inputs"); 428 429 Node* mem_inputs[4]; 430 int mem_inputs_length = 0; 431 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 432 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 433 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 434 435 // In the comparision below, add one to account for the control input, 436 // which may be null, but always takes up a spot in the in array. 437 if (mem_inputs_length + 1 < (int) load->req()) { 438 // This "load" has more inputs than just the memory, base and index inputs. 439 // For purposes of checking anti-dependences, we need to start 440 // from the early block of only the address portion of the instruction, 441 // and ignore other blocks that may have factored into the wider 442 // schedule_early calculation. 443 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 444 445 Block* deepb = NULL; // Deepest block so far 446 int deepb_dom_depth = 0; 447 for (int i = 0; i < mem_inputs_length; i++) { 448 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 449 if (deepb_dom_depth < (int) inb->_dom_depth) { 450 // The new inb must be dominated by the previous deepb. 451 // The various inputs must be linearly ordered in the dom 452 // tree, or else there will not be a unique deepest block. 453 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 454 deepb = inb; // Save deepest block 455 deepb_dom_depth = deepb->_dom_depth; 456 } 457 } 458 early = deepb; 459 } 460 461 return early; 462 } 463 464 //--------------------------insert_anti_dependences--------------------------- 465 // A load may need to witness memory that nearby stores can overwrite. 466 // For each nearby store, either insert an "anti-dependence" edge 467 // from the load to the store, or else move LCA upward to force the 468 // load to (eventually) be scheduled in a block above the store. 469 // 470 // Do not add edges to stores on distinct control-flow paths; 471 // only add edges to stores which might interfere. 472 // 473 // Return the (updated) LCA. There will not be any possibly interfering 474 // store between the load's "early block" and the updated LCA. 475 // Any stores in the updated LCA will have new precedence edges 476 // back to the load. The caller is expected to schedule the load 477 // in the LCA, in which case the precedence edges will make LCM 478 // preserve anti-dependences. The caller may also hoist the load 479 // above the LCA, if it is not the early block. 480 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 481 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 482 assert(LCA != NULL, ""); 483 DEBUG_ONLY(Block* LCA_orig = LCA); 484 485 // Compute the alias index. Loads and stores with different alias indices 486 // do not need anti-dependence edges. 487 int load_alias_idx = C->get_alias_index(load->adr_type()); 488 #ifdef ASSERT 489 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 490 (PrintOpto || VerifyAliases || 491 PrintMiscellaneous && (WizardMode || Verbose))) { 492 // Load nodes should not consume all of memory. 493 // Reporting a bottom type indicates a bug in adlc. 494 // If some particular type of node validly consumes all of memory, 495 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 496 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 497 load->dump(2); 498 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 499 } 500 #endif 501 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 502 "String compare is only known 'load' that does not conflict with any stores"); 503 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 504 "String equals is a 'load' that does not conflict with any stores"); 505 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 506 "String indexOf is a 'load' that does not conflict with any stores"); 507 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOfChar), 508 "String indexOfChar is a 'load' that does not conflict with any stores"); 509 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 510 "Arrays equals is a 'load' that does not conflict with any stores"); 511 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_HasNegatives), 512 "HasNegatives is a 'load' that does not conflict with any stores"); 513 514 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 515 // It is impossible to spoil this load by putting stores before it, 516 // because we know that the stores will never update the value 517 // which 'load' must witness. 518 return LCA; 519 } 520 521 node_idx_t load_index = load->_idx; 522 523 // Note the earliest legal placement of 'load', as determined by 524 // by the unique point in the dom tree where all memory effects 525 // and other inputs are first available. (Computed by schedule_early.) 526 // For normal loads, 'early' is the shallowest place (dom graph wise) 527 // to look for anti-deps between this load and any store. 528 Block* early = get_block_for_node(load); 529 530 // If we are subsuming loads, compute an "early" block that only considers 531 // memory or address inputs. This block may be different than the 532 // schedule_early block in that it could be at an even shallower depth in the 533 // dominator tree, and allow for a broader discovery of anti-dependences. 534 if (C->subsume_loads()) { 535 early = memory_early_block(load, early, this); 536 } 537 538 ResourceArea *area = Thread::current()->resource_area(); 539 Node_List worklist_mem(area); // prior memory state to store 540 Node_List worklist_store(area); // possible-def to explore 541 Node_List worklist_visited(area); // visited mergemem nodes 542 Node_List non_early_stores(area); // all relevant stores outside of early 543 bool must_raise_LCA = false; 544 545 #ifdef TRACK_PHI_INPUTS 546 // %%% This extra checking fails because MergeMem nodes are not GVNed. 547 // Provide "phi_inputs" to check if every input to a PhiNode is from the 548 // original memory state. This indicates a PhiNode for which should not 549 // prevent the load from sinking. For such a block, set_raise_LCA_mark 550 // may be overly conservative. 551 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 552 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 553 #endif 554 555 // 'load' uses some memory state; look for users of the same state. 556 // Recurse through MergeMem nodes to the stores that use them. 557 558 // Each of these stores is a possible definition of memory 559 // that 'load' needs to use. We need to force 'load' 560 // to occur before each such store. When the store is in 561 // the same block as 'load', we insert an anti-dependence 562 // edge load->store. 563 564 // The relevant stores "nearby" the load consist of a tree rooted 565 // at initial_mem, with internal nodes of type MergeMem. 566 // Therefore, the branches visited by the worklist are of this form: 567 // initial_mem -> (MergeMem ->)* store 568 // The anti-dependence constraints apply only to the fringe of this tree. 569 570 Node* initial_mem = load->in(MemNode::Memory); 571 worklist_store.push(initial_mem); 572 worklist_visited.push(initial_mem); 573 worklist_mem.push(NULL); 574 while (worklist_store.size() > 0) { 575 // Examine a nearby store to see if it might interfere with our load. 576 Node* mem = worklist_mem.pop(); 577 Node* store = worklist_store.pop(); 578 uint op = store->Opcode(); 579 580 // MergeMems do not directly have anti-deps. 581 // Treat them as internal nodes in a forward tree of memory states, 582 // the leaves of which are each a 'possible-def'. 583 if (store == initial_mem // root (exclusive) of tree we are searching 584 || op == Op_MergeMem // internal node of tree we are searching 585 ) { 586 mem = store; // It's not a possibly interfering store. 587 if (store == initial_mem) 588 initial_mem = NULL; // only process initial memory once 589 590 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 591 store = mem->fast_out(i); 592 if (store->is_MergeMem()) { 593 // Be sure we don't get into combinatorial problems. 594 // (Allow phis to be repeated; they can merge two relevant states.) 595 uint j = worklist_visited.size(); 596 for (; j > 0; j--) { 597 if (worklist_visited.at(j-1) == store) break; 598 } 599 if (j > 0) continue; // already on work list; do not repeat 600 worklist_visited.push(store); 601 } 602 worklist_mem.push(mem); 603 worklist_store.push(store); 604 } 605 continue; 606 } 607 608 if (op == Op_MachProj || op == Op_Catch) continue; 609 if (store->needs_anti_dependence_check()) continue; // not really a store 610 611 // Compute the alias index. Loads and stores with different alias 612 // indices do not need anti-dependence edges. Wide MemBar's are 613 // anti-dependent on everything (except immutable memories). 614 const TypePtr* adr_type = store->adr_type(); 615 if (!C->can_alias(adr_type, load_alias_idx)) continue; 616 617 // Most slow-path runtime calls do NOT modify Java memory, but 618 // they can block and so write Raw memory. 619 if (store->is_Mach()) { 620 MachNode* mstore = store->as_Mach(); 621 if (load_alias_idx != Compile::AliasIdxRaw) { 622 // Check for call into the runtime using the Java calling 623 // convention (and from there into a wrapper); it has no 624 // _method. Can't do this optimization for Native calls because 625 // they CAN write to Java memory. 626 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 627 assert(mstore->is_MachSafePoint(), ""); 628 MachSafePointNode* ms = (MachSafePointNode*) mstore; 629 assert(ms->is_MachCallJava(), ""); 630 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 631 if (mcj->_method == NULL) { 632 // These runtime calls do not write to Java visible memory 633 // (other than Raw) and so do not require anti-dependence edges. 634 continue; 635 } 636 } 637 // Same for SafePoints: they read/write Raw but only read otherwise. 638 // This is basically a workaround for SafePoints only defining control 639 // instead of control + memory. 640 if (mstore->ideal_Opcode() == Op_SafePoint) 641 continue; 642 } else { 643 // Some raw memory, such as the load of "top" at an allocation, 644 // can be control dependent on the previous safepoint. See 645 // comments in GraphKit::allocate_heap() about control input. 646 // Inserting an anti-dep between such a safepoint and a use 647 // creates a cycle, and will cause a subsequent failure in 648 // local scheduling. (BugId 4919904) 649 // (%%% How can a control input be a safepoint and not a projection??) 650 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 651 continue; 652 } 653 } 654 655 // Identify a block that the current load must be above, 656 // or else observe that 'store' is all the way up in the 657 // earliest legal block for 'load'. In the latter case, 658 // immediately insert an anti-dependence edge. 659 Block* store_block = get_block_for_node(store); 660 assert(store_block != NULL, "unused killing projections skipped above"); 661 662 if (store->is_Phi()) { 663 // 'load' uses memory which is one (or more) of the Phi's inputs. 664 // It must be scheduled not before the Phi, but rather before 665 // each of the relevant Phi inputs. 666 // 667 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 668 // we mark each corresponding predecessor block and do a combined 669 // hoisting operation later (raise_LCA_above_marks). 670 // 671 // Do not assert(store_block != early, "Phi merging memory after access") 672 // PhiNode may be at start of block 'early' with backedge to 'early' 673 DEBUG_ONLY(bool found_match = false); 674 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 675 if (store->in(j) == mem) { // Found matching input? 676 DEBUG_ONLY(found_match = true); 677 Block* pred_block = get_block_for_node(store_block->pred(j)); 678 if (pred_block != early) { 679 // If any predecessor of the Phi matches the load's "early block", 680 // we do not need a precedence edge between the Phi and 'load' 681 // since the load will be forced into a block preceding the Phi. 682 pred_block->set_raise_LCA_mark(load_index); 683 assert(!LCA_orig->dominates(pred_block) || 684 early->dominates(pred_block), "early is high enough"); 685 must_raise_LCA = true; 686 } else { 687 // anti-dependent upon PHI pinned below 'early', no edge needed 688 LCA = early; // but can not schedule below 'early' 689 } 690 } 691 } 692 assert(found_match, "no worklist bug"); 693 #ifdef TRACK_PHI_INPUTS 694 #ifdef ASSERT 695 // This assert asks about correct handling of PhiNodes, which may not 696 // have all input edges directly from 'mem'. See BugId 4621264 697 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 698 // Increment by exactly one even if there are multiple copies of 'mem' 699 // coming into the phi, because we will run this block several times 700 // if there are several copies of 'mem'. (That's how DU iterators work.) 701 phi_inputs.at_put(store->_idx, num_mem_inputs); 702 assert(PhiNode::Input + num_mem_inputs < store->req(), 703 "Expect at least one phi input will not be from original memory state"); 704 #endif //ASSERT 705 #endif //TRACK_PHI_INPUTS 706 } else if (store_block != early) { 707 // 'store' is between the current LCA and earliest possible block. 708 // Label its block, and decide later on how to raise the LCA 709 // to include the effect on LCA of this store. 710 // If this store's block gets chosen as the raised LCA, we 711 // will find him on the non_early_stores list and stick him 712 // with a precedence edge. 713 // (But, don't bother if LCA is already raised all the way.) 714 if (LCA != early) { 715 store_block->set_raise_LCA_mark(load_index); 716 must_raise_LCA = true; 717 non_early_stores.push(store); 718 } 719 } else { 720 // Found a possibly-interfering store in the load's 'early' block. 721 // This means 'load' cannot sink at all in the dominator tree. 722 // Add an anti-dep edge, and squeeze 'load' into the highest block. 723 assert(store != load->in(0), "dependence cycle found"); 724 if (verify) { 725 assert(store->find_edge(load) != -1, "missing precedence edge"); 726 } else { 727 store->add_prec(load); 728 } 729 LCA = early; 730 // This turns off the process of gathering non_early_stores. 731 } 732 } 733 // (Worklist is now empty; all nearby stores have been visited.) 734 735 // Finished if 'load' must be scheduled in its 'early' block. 736 // If we found any stores there, they have already been given 737 // precedence edges. 738 if (LCA == early) return LCA; 739 740 // We get here only if there are no possibly-interfering stores 741 // in the load's 'early' block. Move LCA up above all predecessors 742 // which contain stores we have noted. 743 // 744 // The raised LCA block can be a home to such interfering stores, 745 // but its predecessors must not contain any such stores. 746 // 747 // The raised LCA will be a lower bound for placing the load, 748 // preventing the load from sinking past any block containing 749 // a store that may invalidate the memory state required by 'load'. 750 if (must_raise_LCA) 751 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 752 if (LCA == early) return LCA; 753 754 // Insert anti-dependence edges from 'load' to each store 755 // in the non-early LCA block. 756 // Mine the non_early_stores list for such stores. 757 if (LCA->raise_LCA_mark() == load_index) { 758 while (non_early_stores.size() > 0) { 759 Node* store = non_early_stores.pop(); 760 Block* store_block = get_block_for_node(store); 761 if (store_block == LCA) { 762 // add anti_dependence from store to load in its own block 763 assert(store != load->in(0), "dependence cycle found"); 764 if (verify) { 765 assert(store->find_edge(load) != -1, "missing precedence edge"); 766 } else { 767 store->add_prec(load); 768 } 769 } else { 770 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 771 // Any other stores we found must be either inside the new LCA 772 // or else outside the original LCA. In the latter case, they 773 // did not interfere with any use of 'load'. 774 assert(LCA->dominates(store_block) 775 || !LCA_orig->dominates(store_block), "no stray stores"); 776 } 777 } 778 } 779 780 // Return the highest block containing stores; any stores 781 // within that block have been given anti-dependence edges. 782 return LCA; 783 } 784 785 // This class is used to iterate backwards over the nodes in the graph. 786 787 class Node_Backward_Iterator { 788 789 private: 790 Node_Backward_Iterator(); 791 792 public: 793 // Constructor for the iterator 794 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg); 795 796 // Postincrement operator to iterate over the nodes 797 Node *next(); 798 799 private: 800 VectorSet &_visited; 801 Node_List &_stack; 802 PhaseCFG &_cfg; 803 }; 804 805 // Constructor for the Node_Backward_Iterator 806 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg) 807 : _visited(visited), _stack(stack), _cfg(cfg) { 808 // The stack should contain exactly the root 809 stack.clear(); 810 stack.push(root); 811 812 // Clear the visited bits 813 visited.Clear(); 814 } 815 816 // Iterator for the Node_Backward_Iterator 817 Node *Node_Backward_Iterator::next() { 818 819 // If the _stack is empty, then just return NULL: finished. 820 if ( !_stack.size() ) 821 return NULL; 822 823 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 824 // made stateless, so I do not need to record the index 'i' on my _stack. 825 // Instead I visit all users each time, scanning for unvisited users. 826 // I visit unvisited not-anti-dependence users first, then anti-dependent 827 // children next. 828 Node *self = _stack.pop(); 829 830 // I cycle here when I am entering a deeper level of recursion. 831 // The key variable 'self' was set prior to jumping here. 832 while( 1 ) { 833 834 _visited.set(self->_idx); 835 836 // Now schedule all uses as late as possible. 837 const Node* src = self->is_Proj() ? self->in(0) : self; 838 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 839 840 // Schedule all nodes in a post-order visit 841 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 842 843 // Scan for unvisited nodes 844 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 845 // For all uses, schedule late 846 Node* n = self->fast_out(i); // Use 847 848 // Skip already visited children 849 if ( _visited.test(n->_idx) ) 850 continue; 851 852 // do not traverse backward control edges 853 Node *use = n->is_Proj() ? n->in(0) : n; 854 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 855 856 if ( use_rpo < src_rpo ) 857 continue; 858 859 // Phi nodes always precede uses in a basic block 860 if ( use_rpo == src_rpo && use->is_Phi() ) 861 continue; 862 863 unvisited = n; // Found unvisited 864 865 // Check for possible-anti-dependent 866 if( !n->needs_anti_dependence_check() ) 867 break; // Not visited, not anti-dep; schedule it NOW 868 } 869 870 // Did I find an unvisited not-anti-dependent Node? 871 if ( !unvisited ) 872 break; // All done with children; post-visit 'self' 873 874 // Visit the unvisited Node. Contains the obvious push to 875 // indicate I'm entering a deeper level of recursion. I push the 876 // old state onto the _stack and set a new state and loop (recurse). 877 _stack.push(self); 878 self = unvisited; 879 } // End recursion loop 880 881 return self; 882 } 883 884 //------------------------------ComputeLatenciesBackwards---------------------- 885 // Compute the latency of all the instructions. 886 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { 887 #ifndef PRODUCT 888 if (trace_opto_pipelining()) 889 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 890 #endif 891 892 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 893 Node *n; 894 895 // Walk over all the nodes from last to first 896 while (n = iter.next()) { 897 // Set the latency for the definitions of this instruction 898 partial_latency_of_defs(n); 899 } 900 } // end ComputeLatenciesBackwards 901 902 //------------------------------partial_latency_of_defs------------------------ 903 // Compute the latency impact of this node on all defs. This computes 904 // a number that increases as we approach the beginning of the routine. 905 void PhaseCFG::partial_latency_of_defs(Node *n) { 906 // Set the latency for this instruction 907 #ifndef PRODUCT 908 if (trace_opto_pipelining()) { 909 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 910 dump(); 911 } 912 #endif 913 914 if (n->is_Proj()) { 915 n = n->in(0); 916 } 917 918 if (n->is_Root()) { 919 return; 920 } 921 922 uint nlen = n->len(); 923 uint use_latency = get_latency_for_node(n); 924 uint use_pre_order = get_block_for_node(n)->_pre_order; 925 926 for (uint j = 0; j < nlen; j++) { 927 Node *def = n->in(j); 928 929 if (!def || def == n) { 930 continue; 931 } 932 933 // Walk backwards thru projections 934 if (def->is_Proj()) { 935 def = def->in(0); 936 } 937 938 #ifndef PRODUCT 939 if (trace_opto_pipelining()) { 940 tty->print("# in(%2d): ", j); 941 def->dump(); 942 } 943 #endif 944 945 // If the defining block is not known, assume it is ok 946 Block *def_block = get_block_for_node(def); 947 uint def_pre_order = def_block ? def_block->_pre_order : 0; 948 949 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 950 continue; 951 } 952 953 uint delta_latency = n->latency(j); 954 uint current_latency = delta_latency + use_latency; 955 956 if (get_latency_for_node(def) < current_latency) { 957 set_latency_for_node(def, current_latency); 958 } 959 960 #ifndef PRODUCT 961 if (trace_opto_pipelining()) { 962 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 963 } 964 #endif 965 } 966 } 967 968 //------------------------------latency_from_use------------------------------- 969 // Compute the latency of a specific use 970 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 971 // If self-reference, return no latency 972 if (use == n || use->is_Root()) { 973 return 0; 974 } 975 976 uint def_pre_order = get_block_for_node(def)->_pre_order; 977 uint latency = 0; 978 979 // If the use is not a projection, then it is simple... 980 if (!use->is_Proj()) { 981 #ifndef PRODUCT 982 if (trace_opto_pipelining()) { 983 tty->print("# out(): "); 984 use->dump(); 985 } 986 #endif 987 988 uint use_pre_order = get_block_for_node(use)->_pre_order; 989 990 if (use_pre_order < def_pre_order) 991 return 0; 992 993 if (use_pre_order == def_pre_order && use->is_Phi()) 994 return 0; 995 996 uint nlen = use->len(); 997 uint nl = get_latency_for_node(use); 998 999 for ( uint j=0; j<nlen; j++ ) { 1000 if (use->in(j) == n) { 1001 // Change this if we want local latencies 1002 uint ul = use->latency(j); 1003 uint l = ul + nl; 1004 if (latency < l) latency = l; 1005 #ifndef PRODUCT 1006 if (trace_opto_pipelining()) { 1007 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 1008 nl, j, ul, l, latency); 1009 } 1010 #endif 1011 } 1012 } 1013 } else { 1014 // This is a projection, just grab the latency of the use(s) 1015 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1016 uint l = latency_from_use(use, def, use->fast_out(j)); 1017 if (latency < l) latency = l; 1018 } 1019 } 1020 1021 return latency; 1022 } 1023 1024 //------------------------------latency_from_uses------------------------------ 1025 // Compute the latency of this instruction relative to all of it's uses. 1026 // This computes a number that increases as we approach the beginning of the 1027 // routine. 1028 void PhaseCFG::latency_from_uses(Node *n) { 1029 // Set the latency for this instruction 1030 #ifndef PRODUCT 1031 if (trace_opto_pipelining()) { 1032 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1033 dump(); 1034 } 1035 #endif 1036 uint latency=0; 1037 const Node *def = n->is_Proj() ? n->in(0): n; 1038 1039 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1040 uint l = latency_from_use(n, def, n->fast_out(i)); 1041 1042 if (latency < l) latency = l; 1043 } 1044 1045 set_latency_for_node(n, latency); 1046 } 1047 1048 //------------------------------hoist_to_cheaper_block------------------------- 1049 // Pick a block for node self, between early and LCA, that is a cheaper 1050 // alternative to LCA. 1051 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1052 const double delta = 1+PROB_UNLIKELY_MAG(4); 1053 Block* least = LCA; 1054 double least_freq = least->_freq; 1055 uint target = get_latency_for_node(self); 1056 uint start_latency = get_latency_for_node(LCA->head()); 1057 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1058 bool in_latency = (target <= start_latency); 1059 const Block* root_block = get_block_for_node(_root); 1060 1061 // Turn off latency scheduling if scheduling is just plain off 1062 if (!C->do_scheduling()) 1063 in_latency = true; 1064 1065 // Do not hoist (to cover latency) instructions which target a 1066 // single register. Hoisting stretches the live range of the 1067 // single register and may force spilling. 1068 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1069 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1070 in_latency = true; 1071 1072 #ifndef PRODUCT 1073 if (trace_opto_pipelining()) { 1074 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1075 self->dump(); 1076 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1077 LCA->_pre_order, 1078 LCA->head()->_idx, 1079 start_latency, 1080 LCA->get_node(LCA->end_idx())->_idx, 1081 end_latency, 1082 least_freq); 1083 } 1084 #endif 1085 1086 int cand_cnt = 0; // number of candidates tried 1087 1088 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1089 // the earliest legal location. Capture the least execution frequency. 1090 while (LCA != early) { 1091 LCA = LCA->_idom; // Follow up the dominator tree 1092 1093 if (LCA == NULL) { 1094 // Bailout without retry 1095 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1096 return least; 1097 } 1098 1099 // Don't hoist machine instructions to the root basic block 1100 if (mach && LCA == root_block) 1101 break; 1102 1103 uint start_lat = get_latency_for_node(LCA->head()); 1104 uint end_idx = LCA->end_idx(); 1105 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1106 double LCA_freq = LCA->_freq; 1107 #ifndef PRODUCT 1108 if (trace_opto_pipelining()) { 1109 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1110 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1111 } 1112 #endif 1113 cand_cnt++; 1114 if (LCA_freq < least_freq || // Better Frequency 1115 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1116 (!StressGCM && // Otherwise, choose with latency 1117 !in_latency && // No block containing latency 1118 LCA_freq < least_freq * delta && // No worse frequency 1119 target >= end_lat && // within latency range 1120 !self->is_iteratively_computed() ) // But don't hoist IV increments 1121 // because they may end up above other uses of their phi forcing 1122 // their result register to be different from their input. 1123 ) { 1124 least = LCA; // Found cheaper block 1125 least_freq = LCA_freq; 1126 start_latency = start_lat; 1127 end_latency = end_lat; 1128 if (target <= start_lat) 1129 in_latency = true; 1130 } 1131 } 1132 1133 #ifndef PRODUCT 1134 if (trace_opto_pipelining()) { 1135 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1136 least->_pre_order, start_latency, least_freq); 1137 } 1138 #endif 1139 1140 // See if the latency needs to be updated 1141 if (target < end_latency) { 1142 #ifndef PRODUCT 1143 if (trace_opto_pipelining()) { 1144 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1145 } 1146 #endif 1147 set_latency_for_node(self, end_latency); 1148 partial_latency_of_defs(self); 1149 } 1150 1151 return least; 1152 } 1153 1154 1155 //------------------------------schedule_late----------------------------------- 1156 // Now schedule all codes as LATE as possible. This is the LCA in the 1157 // dominator tree of all USES of a value. Pick the block with the least 1158 // loop nesting depth that is lowest in the dominator tree. 1159 extern const char must_clone[]; 1160 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1161 #ifndef PRODUCT 1162 if (trace_opto_pipelining()) 1163 tty->print("\n#---- schedule_late ----\n"); 1164 #endif 1165 1166 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1167 Node *self; 1168 1169 // Walk over all the nodes from last to first 1170 while (self = iter.next()) { 1171 Block* early = get_block_for_node(self); // Earliest legal placement 1172 1173 if (self->is_top()) { 1174 // Top node goes in bb #2 with other constants. 1175 // It must be special-cased, because it has no out edges. 1176 early->add_inst(self); 1177 continue; 1178 } 1179 1180 // No uses, just terminate 1181 if (self->outcnt() == 0) { 1182 assert(self->is_MachProj(), "sanity"); 1183 continue; // Must be a dead machine projection 1184 } 1185 1186 // If node is pinned in the block, then no scheduling can be done. 1187 if( self->pinned() ) // Pinned in block? 1188 continue; 1189 1190 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1191 if (mach) { 1192 switch (mach->ideal_Opcode()) { 1193 case Op_CreateEx: 1194 // Don't move exception creation 1195 early->add_inst(self); 1196 continue; 1197 break; 1198 case Op_CheckCastPP: 1199 // Don't move CheckCastPP nodes away from their input, if the input 1200 // is a rawptr (5071820). 1201 Node *def = self->in(1); 1202 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1203 early->add_inst(self); 1204 #ifdef ASSERT 1205 _raw_oops.push(def); 1206 #endif 1207 continue; 1208 } 1209 break; 1210 } 1211 } 1212 1213 // Gather LCA of all uses 1214 Block *LCA = NULL; 1215 { 1216 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1217 // For all uses, find LCA 1218 Node* use = self->fast_out(i); 1219 LCA = raise_LCA_above_use(LCA, use, self, this); 1220 } 1221 } // (Hide defs of imax, i from rest of block.) 1222 1223 // Place temps in the block of their use. This isn't a 1224 // requirement for correctness but it reduces useless 1225 // interference between temps and other nodes. 1226 if (mach != NULL && mach->is_MachTemp()) { 1227 map_node_to_block(self, LCA); 1228 LCA->add_inst(self); 1229 continue; 1230 } 1231 1232 // Check if 'self' could be anti-dependent on memory 1233 if (self->needs_anti_dependence_check()) { 1234 // Hoist LCA above possible-defs and insert anti-dependences to 1235 // defs in new LCA block. 1236 LCA = insert_anti_dependences(LCA, self); 1237 } 1238 1239 if (early->_dom_depth > LCA->_dom_depth) { 1240 // Somehow the LCA has moved above the earliest legal point. 1241 // (One way this can happen is via memory_early_block.) 1242 if (C->subsume_loads() == true && !C->failing()) { 1243 // Retry with subsume_loads == false 1244 // If this is the first failure, the sentinel string will "stick" 1245 // to the Compile object, and the C2Compiler will see it and retry. 1246 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1247 } else { 1248 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1249 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1250 } 1251 return; 1252 } 1253 1254 // If there is no opportunity to hoist, then we're done. 1255 // In stress mode, try to hoist even the single operations. 1256 bool try_to_hoist = StressGCM || (LCA != early); 1257 1258 // Must clone guys stay next to use; no hoisting allowed. 1259 // Also cannot hoist guys that alter memory or are otherwise not 1260 // allocatable (hoisting can make a value live longer, leading to 1261 // anti and output dependency problems which are normally resolved 1262 // by the register allocator giving everyone a different register). 1263 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1264 try_to_hoist = false; 1265 1266 Block* late = NULL; 1267 if (try_to_hoist) { 1268 // Now find the block with the least execution frequency. 1269 // Start at the latest schedule and work up to the earliest schedule 1270 // in the dominator tree. Thus the Node will dominate all its uses. 1271 late = hoist_to_cheaper_block(LCA, early, self); 1272 } else { 1273 // Just use the LCA of the uses. 1274 late = LCA; 1275 } 1276 1277 // Put the node into target block 1278 schedule_node_into_block(self, late); 1279 1280 #ifdef ASSERT 1281 if (self->needs_anti_dependence_check()) { 1282 // since precedence edges are only inserted when we're sure they 1283 // are needed make sure that after placement in a block we don't 1284 // need any new precedence edges. 1285 verify_anti_dependences(late, self); 1286 } 1287 #endif 1288 } // Loop until all nodes have been visited 1289 1290 } // end ScheduleLate 1291 1292 //------------------------------GlobalCodeMotion------------------------------- 1293 void PhaseCFG::global_code_motion() { 1294 ResourceMark rm; 1295 1296 #ifndef PRODUCT 1297 if (trace_opto_pipelining()) { 1298 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1299 } 1300 #endif 1301 1302 // Initialize the node to block mapping for things on the proj_list 1303 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1304 unmap_node_from_block(_matcher.get_projection(i)); 1305 } 1306 1307 // Set the basic block for Nodes pinned into blocks 1308 Arena* arena = Thread::current()->resource_area(); 1309 VectorSet visited(arena); 1310 schedule_pinned_nodes(visited); 1311 1312 // Find the earliest Block any instruction can be placed in. Some 1313 // instructions are pinned into Blocks. Unpinned instructions can 1314 // appear in last block in which all their inputs occur. 1315 visited.Clear(); 1316 Node_List stack(arena); 1317 // Pre-grow the list 1318 stack.map((C->live_nodes() >> 1) + 16, NULL); 1319 if (!schedule_early(visited, stack)) { 1320 // Bailout without retry 1321 C->record_method_not_compilable("early schedule failed"); 1322 return; 1323 } 1324 1325 // Build Def-Use edges. 1326 // Compute the latency information (via backwards walk) for all the 1327 // instructions in the graph 1328 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1329 1330 if (C->do_scheduling()) { 1331 compute_latencies_backwards(visited, stack); 1332 } 1333 1334 // Now schedule all codes as LATE as possible. This is the LCA in the 1335 // dominator tree of all USES of a value. Pick the block with the least 1336 // loop nesting depth that is lowest in the dominator tree. 1337 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1338 schedule_late(visited, stack); 1339 if (C->failing()) { 1340 // schedule_late fails only when graph is incorrect. 1341 assert(!VerifyGraphEdges, "verification should have failed"); 1342 return; 1343 } 1344 1345 #ifndef PRODUCT 1346 if (trace_opto_pipelining()) { 1347 tty->print("\n---- Detect implicit null checks ----\n"); 1348 } 1349 #endif 1350 1351 // Detect implicit-null-check opportunities. Basically, find NULL checks 1352 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1353 // I can generate a memory op if there is not one nearby. 1354 if (C->is_method_compilation()) { 1355 // By reversing the loop direction we get a very minor gain on mpegaudio. 1356 // Feel free to revert to a forward loop for clarity. 1357 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1358 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1359 Node* proj = _matcher._null_check_tests[i]; 1360 Node* val = _matcher._null_check_tests[i + 1]; 1361 Block* block = get_block_for_node(proj); 1362 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1363 // The implicit_null_check will only perform the transformation 1364 // if the null branch is truly uncommon, *and* it leads to an 1365 // uncommon trap. Combined with the too_many_traps guards 1366 // above, this prevents SEGV storms reported in 6366351, 1367 // by recompiling offending methods without this optimization. 1368 } 1369 } 1370 1371 bool block_size_threshold_ok = false; 1372 intptr_t *recalc_pressure_nodes = NULL; 1373 if (OptoRegScheduling) { 1374 for (uint i = 0; i < number_of_blocks(); i++) { 1375 Block* block = get_block(i); 1376 if (block->number_of_nodes() > 10) { 1377 block_size_threshold_ok = true; 1378 break; 1379 } 1380 } 1381 } 1382 1383 // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it 1384 // is key to enabling this feature. 1385 PhaseChaitin regalloc(C->unique(), *this, _matcher, true); 1386 ResourceArea live_arena; // Arena for liveness 1387 ResourceMark rm_live(&live_arena); 1388 PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true); 1389 PhaseIFG ifg(&live_arena); 1390 if (OptoRegScheduling && block_size_threshold_ok) { 1391 regalloc.mark_ssa(); 1392 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 1393 rm_live.reset_to_mark(); // Reclaim working storage 1394 IndexSet::reset_memory(C, &live_arena); 1395 uint node_size = regalloc._lrg_map.max_lrg_id(); 1396 ifg.init(node_size); // Empty IFG 1397 regalloc.set_ifg(ifg); 1398 regalloc.set_live(live); 1399 regalloc.gather_lrg_masks(false); // Collect LRG masks 1400 live.compute(node_size); // Compute liveness 1401 1402 recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size); 1403 for (uint i = 0; i < node_size; i++) { 1404 recalc_pressure_nodes[i] = 0; 1405 } 1406 } 1407 _regalloc = ®alloc; 1408 1409 #ifndef PRODUCT 1410 if (trace_opto_pipelining()) { 1411 tty->print("\n---- Start Local Scheduling ----\n"); 1412 } 1413 #endif 1414 1415 // Schedule locally. Right now a simple topological sort. 1416 // Later, do a real latency aware scheduler. 1417 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1418 visited.Clear(); 1419 for (uint i = 0; i < number_of_blocks(); i++) { 1420 Block* block = get_block(i); 1421 if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) { 1422 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1423 C->record_method_not_compilable("local schedule failed"); 1424 } 1425 _regalloc = NULL; 1426 return; 1427 } 1428 } 1429 _regalloc = NULL; 1430 1431 // If we inserted any instructions between a Call and his CatchNode, 1432 // clone the instructions on all paths below the Catch. 1433 for (uint i = 0; i < number_of_blocks(); i++) { 1434 Block* block = get_block(i); 1435 call_catch_cleanup(block); 1436 } 1437 1438 #ifndef PRODUCT 1439 if (trace_opto_pipelining()) { 1440 tty->print("\n---- After GlobalCodeMotion ----\n"); 1441 for (uint i = 0; i < number_of_blocks(); i++) { 1442 Block* block = get_block(i); 1443 block->dump(); 1444 } 1445 } 1446 #endif 1447 // Dead. 1448 _node_latency = (GrowableArray<uint> *)0xdeadbeef; 1449 } 1450 1451 bool PhaseCFG::do_global_code_motion() { 1452 1453 build_dominator_tree(); 1454 if (C->failing()) { 1455 return false; 1456 } 1457 1458 NOT_PRODUCT( C->verify_graph_edges(); ) 1459 1460 estimate_block_frequency(); 1461 1462 global_code_motion(); 1463 1464 if (C->failing()) { 1465 return false; 1466 } 1467 1468 return true; 1469 } 1470 1471 //------------------------------Estimate_Block_Frequency----------------------- 1472 // Estimate block frequencies based on IfNode probabilities. 1473 void PhaseCFG::estimate_block_frequency() { 1474 1475 // Force conditional branches leading to uncommon traps to be unlikely, 1476 // not because we get to the uncommon_trap with less relative frequency, 1477 // but because an uncommon_trap typically causes a deopt, so we only get 1478 // there once. 1479 if (C->do_freq_based_layout()) { 1480 Block_List worklist; 1481 Block* root_blk = get_block(0); 1482 for (uint i = 1; i < root_blk->num_preds(); i++) { 1483 Block *pb = get_block_for_node(root_blk->pred(i)); 1484 if (pb->has_uncommon_code()) { 1485 worklist.push(pb); 1486 } 1487 } 1488 while (worklist.size() > 0) { 1489 Block* uct = worklist.pop(); 1490 if (uct == get_root_block()) { 1491 continue; 1492 } 1493 for (uint i = 1; i < uct->num_preds(); i++) { 1494 Block *pb = get_block_for_node(uct->pred(i)); 1495 if (pb->_num_succs == 1) { 1496 worklist.push(pb); 1497 } else if (pb->num_fall_throughs() == 2) { 1498 pb->update_uncommon_branch(uct); 1499 } 1500 } 1501 } 1502 } 1503 1504 // Create the loop tree and calculate loop depth. 1505 _root_loop = create_loop_tree(); 1506 _root_loop->compute_loop_depth(0); 1507 1508 // Compute block frequency of each block, relative to a single loop entry. 1509 _root_loop->compute_freq(); 1510 1511 // Adjust all frequencies to be relative to a single method entry 1512 _root_loop->_freq = 1.0; 1513 _root_loop->scale_freq(); 1514 1515 // Save outmost loop frequency for LRG frequency threshold 1516 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1517 1518 // force paths ending at uncommon traps to be infrequent 1519 if (!C->do_freq_based_layout()) { 1520 Block_List worklist; 1521 Block* root_blk = get_block(0); 1522 for (uint i = 1; i < root_blk->num_preds(); i++) { 1523 Block *pb = get_block_for_node(root_blk->pred(i)); 1524 if (pb->has_uncommon_code()) { 1525 worklist.push(pb); 1526 } 1527 } 1528 while (worklist.size() > 0) { 1529 Block* uct = worklist.pop(); 1530 uct->_freq = PROB_MIN; 1531 for (uint i = 1; i < uct->num_preds(); i++) { 1532 Block *pb = get_block_for_node(uct->pred(i)); 1533 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1534 worklist.push(pb); 1535 } 1536 } 1537 } 1538 } 1539 1540 #ifdef ASSERT 1541 for (uint i = 0; i < number_of_blocks(); i++) { 1542 Block* b = get_block(i); 1543 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1544 } 1545 #endif 1546 1547 #ifndef PRODUCT 1548 if (PrintCFGBlockFreq) { 1549 tty->print_cr("CFG Block Frequencies"); 1550 _root_loop->dump_tree(); 1551 if (Verbose) { 1552 tty->print_cr("PhaseCFG dump"); 1553 dump(); 1554 tty->print_cr("Node dump"); 1555 _root->dump(99999); 1556 } 1557 } 1558 #endif 1559 } 1560 1561 //----------------------------create_loop_tree-------------------------------- 1562 // Create a loop tree from the CFG 1563 CFGLoop* PhaseCFG::create_loop_tree() { 1564 1565 #ifdef ASSERT 1566 assert(get_block(0) == get_root_block(), "first block should be root block"); 1567 for (uint i = 0; i < number_of_blocks(); i++) { 1568 Block* block = get_block(i); 1569 // Check that _loop field are clear...we could clear them if not. 1570 assert(block->_loop == NULL, "clear _loop expected"); 1571 // Sanity check that the RPO numbering is reflected in the _blocks array. 1572 // It doesn't have to be for the loop tree to be built, but if it is not, 1573 // then the blocks have been reordered since dom graph building...which 1574 // may question the RPO numbering 1575 assert(block->_rpo == i, "unexpected reverse post order number"); 1576 } 1577 #endif 1578 1579 int idct = 0; 1580 CFGLoop* root_loop = new CFGLoop(idct++); 1581 1582 Block_List worklist; 1583 1584 // Assign blocks to loops 1585 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1586 Block* block = get_block(i); 1587 1588 if (block->head()->is_Loop()) { 1589 Block* loop_head = block; 1590 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1591 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1592 Block* tail = get_block_for_node(tail_n); 1593 1594 // Defensively filter out Loop nodes for non-single-entry loops. 1595 // For all reasonable loops, the head occurs before the tail in RPO. 1596 if (i <= tail->_rpo) { 1597 1598 // The tail and (recursive) predecessors of the tail 1599 // are made members of a new loop. 1600 1601 assert(worklist.size() == 0, "nonempty worklist"); 1602 CFGLoop* nloop = new CFGLoop(idct++); 1603 assert(loop_head->_loop == NULL, "just checking"); 1604 loop_head->_loop = nloop; 1605 // Add to nloop so push_pred() will skip over inner loops 1606 nloop->add_member(loop_head); 1607 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1608 1609 while (worklist.size() > 0) { 1610 Block* member = worklist.pop(); 1611 if (member != loop_head) { 1612 for (uint j = 1; j < member->num_preds(); j++) { 1613 nloop->push_pred(member, j, worklist, this); 1614 } 1615 } 1616 } 1617 } 1618 } 1619 } 1620 1621 // Create a member list for each loop consisting 1622 // of both blocks and (immediate child) loops. 1623 for (uint i = 0; i < number_of_blocks(); i++) { 1624 Block* block = get_block(i); 1625 CFGLoop* lp = block->_loop; 1626 if (lp == NULL) { 1627 // Not assigned to a loop. Add it to the method's pseudo loop. 1628 block->_loop = root_loop; 1629 lp = root_loop; 1630 } 1631 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1632 lp->add_member(block); 1633 } 1634 if (lp != root_loop) { 1635 if (lp->parent() == NULL) { 1636 // Not a nested loop. Make it a child of the method's pseudo loop. 1637 root_loop->add_nested_loop(lp); 1638 } 1639 if (block == lp->head()) { 1640 // Add nested loop to member list of parent loop. 1641 lp->parent()->add_member(lp); 1642 } 1643 } 1644 } 1645 1646 return root_loop; 1647 } 1648 1649 //------------------------------push_pred-------------------------------------- 1650 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1651 Node* pred_n = blk->pred(i); 1652 Block* pred = cfg->get_block_for_node(pred_n); 1653 CFGLoop *pred_loop = pred->_loop; 1654 if (pred_loop == NULL) { 1655 // Filter out blocks for non-single-entry loops. 1656 // For all reasonable loops, the head occurs before the tail in RPO. 1657 if (pred->_rpo > head()->_rpo) { 1658 pred->_loop = this; 1659 worklist.push(pred); 1660 } 1661 } else if (pred_loop != this) { 1662 // Nested loop. 1663 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1664 pred_loop = pred_loop->_parent; 1665 } 1666 // Make pred's loop be a child 1667 if (pred_loop->_parent == NULL) { 1668 add_nested_loop(pred_loop); 1669 // Continue with loop entry predecessor. 1670 Block* pred_head = pred_loop->head(); 1671 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1672 assert(pred_head != head(), "loop head in only one loop"); 1673 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1674 } else { 1675 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1676 } 1677 } 1678 } 1679 1680 //------------------------------add_nested_loop-------------------------------- 1681 // Make cl a child of the current loop in the loop tree. 1682 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1683 assert(_parent == NULL, "no parent yet"); 1684 assert(cl != this, "not my own parent"); 1685 cl->_parent = this; 1686 CFGLoop* ch = _child; 1687 if (ch == NULL) { 1688 _child = cl; 1689 } else { 1690 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1691 ch->_sibling = cl; 1692 } 1693 } 1694 1695 //------------------------------compute_loop_depth----------------------------- 1696 // Store the loop depth in each CFGLoop object. 1697 // Recursively walk the children to do the same for them. 1698 void CFGLoop::compute_loop_depth(int depth) { 1699 _depth = depth; 1700 CFGLoop* ch = _child; 1701 while (ch != NULL) { 1702 ch->compute_loop_depth(depth + 1); 1703 ch = ch->_sibling; 1704 } 1705 } 1706 1707 //------------------------------compute_freq----------------------------------- 1708 // Compute the frequency of each block and loop, relative to a single entry 1709 // into the dominating loop head. 1710 void CFGLoop::compute_freq() { 1711 // Bottom up traversal of loop tree (visit inner loops first.) 1712 // Set loop head frequency to 1.0, then transitively 1713 // compute frequency for all successors in the loop, 1714 // as well as for each exit edge. Inner loops are 1715 // treated as single blocks with loop exit targets 1716 // as the successor blocks. 1717 1718 // Nested loops first 1719 CFGLoop* ch = _child; 1720 while (ch != NULL) { 1721 ch->compute_freq(); 1722 ch = ch->_sibling; 1723 } 1724 assert (_members.length() > 0, "no empty loops"); 1725 Block* hd = head(); 1726 hd->_freq = 1.0; 1727 for (int i = 0; i < _members.length(); i++) { 1728 CFGElement* s = _members.at(i); 1729 double freq = s->_freq; 1730 if (s->is_block()) { 1731 Block* b = s->as_Block(); 1732 for (uint j = 0; j < b->_num_succs; j++) { 1733 Block* sb = b->_succs[j]; 1734 update_succ_freq(sb, freq * b->succ_prob(j)); 1735 } 1736 } else { 1737 CFGLoop* lp = s->as_CFGLoop(); 1738 assert(lp->_parent == this, "immediate child"); 1739 for (int k = 0; k < lp->_exits.length(); k++) { 1740 Block* eb = lp->_exits.at(k).get_target(); 1741 double prob = lp->_exits.at(k).get_prob(); 1742 update_succ_freq(eb, freq * prob); 1743 } 1744 } 1745 } 1746 1747 // For all loops other than the outer, "method" loop, 1748 // sum and normalize the exit probability. The "method" loop 1749 // should keep the initial exit probability of 1, so that 1750 // inner blocks do not get erroneously scaled. 1751 if (_depth != 0) { 1752 // Total the exit probabilities for this loop. 1753 double exits_sum = 0.0f; 1754 for (int i = 0; i < _exits.length(); i++) { 1755 exits_sum += _exits.at(i).get_prob(); 1756 } 1757 1758 // Normalize the exit probabilities. Until now, the 1759 // probabilities estimate the possibility of exit per 1760 // a single loop iteration; afterward, they estimate 1761 // the probability of exit per loop entry. 1762 for (int i = 0; i < _exits.length(); i++) { 1763 Block* et = _exits.at(i).get_target(); 1764 float new_prob = 0.0f; 1765 if (_exits.at(i).get_prob() > 0.0f) { 1766 new_prob = _exits.at(i).get_prob() / exits_sum; 1767 } 1768 BlockProbPair bpp(et, new_prob); 1769 _exits.at_put(i, bpp); 1770 } 1771 1772 // Save the total, but guard against unreasonable probability, 1773 // as the value is used to estimate the loop trip count. 1774 // An infinite trip count would blur relative block 1775 // frequencies. 1776 if (exits_sum > 1.0f) exits_sum = 1.0; 1777 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1778 _exit_prob = exits_sum; 1779 } 1780 } 1781 1782 //------------------------------succ_prob------------------------------------- 1783 // Determine the probability of reaching successor 'i' from the receiver block. 1784 float Block::succ_prob(uint i) { 1785 int eidx = end_idx(); 1786 Node *n = get_node(eidx); // Get ending Node 1787 1788 int op = n->Opcode(); 1789 if (n->is_Mach()) { 1790 if (n->is_MachNullCheck()) { 1791 // Can only reach here if called after lcm. The original Op_If is gone, 1792 // so we attempt to infer the probability from one or both of the 1793 // successor blocks. 1794 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1795 // If either successor has only one predecessor, then the 1796 // probability estimate can be derived using the 1797 // relative frequency of the successor and this block. 1798 if (_succs[i]->num_preds() == 2) { 1799 return _succs[i]->_freq / _freq; 1800 } else if (_succs[1-i]->num_preds() == 2) { 1801 return 1 - (_succs[1-i]->_freq / _freq); 1802 } else { 1803 // Estimate using both successor frequencies 1804 float freq = _succs[i]->_freq; 1805 return freq / (freq + _succs[1-i]->_freq); 1806 } 1807 } 1808 op = n->as_Mach()->ideal_Opcode(); 1809 } 1810 1811 1812 // Switch on branch type 1813 switch( op ) { 1814 case Op_CountedLoopEnd: 1815 case Op_If: { 1816 assert (i < 2, "just checking"); 1817 // Conditionals pass on only part of their frequency 1818 float prob = n->as_MachIf()->_prob; 1819 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1820 // If succ[i] is the FALSE branch, invert path info 1821 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1822 return 1.0f - prob; // not taken 1823 } else { 1824 return prob; // taken 1825 } 1826 } 1827 1828 case Op_Jump: 1829 // Divide the frequency between all successors evenly 1830 return 1.0f/_num_succs; 1831 1832 case Op_Catch: { 1833 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1834 if (ci->_con == CatchProjNode::fall_through_index) { 1835 // Fall-thru path gets the lion's share. 1836 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1837 } else { 1838 // Presume exceptional paths are equally unlikely 1839 return PROB_UNLIKELY_MAG(5); 1840 } 1841 } 1842 1843 case Op_Root: 1844 case Op_Goto: 1845 // Pass frequency straight thru to target 1846 return 1.0f; 1847 1848 case Op_NeverBranch: 1849 return 0.0f; 1850 1851 case Op_TailCall: 1852 case Op_TailJump: 1853 case Op_Return: 1854 case Op_Halt: 1855 case Op_Rethrow: 1856 // Do not push out freq to root block 1857 return 0.0f; 1858 1859 default: 1860 ShouldNotReachHere(); 1861 } 1862 1863 return 0.0f; 1864 } 1865 1866 //------------------------------num_fall_throughs----------------------------- 1867 // Return the number of fall-through candidates for a block 1868 int Block::num_fall_throughs() { 1869 int eidx = end_idx(); 1870 Node *n = get_node(eidx); // Get ending Node 1871 1872 int op = n->Opcode(); 1873 if (n->is_Mach()) { 1874 if (n->is_MachNullCheck()) { 1875 // In theory, either side can fall-thru, for simplicity sake, 1876 // let's say only the false branch can now. 1877 return 1; 1878 } 1879 op = n->as_Mach()->ideal_Opcode(); 1880 } 1881 1882 // Switch on branch type 1883 switch( op ) { 1884 case Op_CountedLoopEnd: 1885 case Op_If: 1886 return 2; 1887 1888 case Op_Root: 1889 case Op_Goto: 1890 return 1; 1891 1892 case Op_Catch: { 1893 for (uint i = 0; i < _num_succs; i++) { 1894 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1895 if (ci->_con == CatchProjNode::fall_through_index) { 1896 return 1; 1897 } 1898 } 1899 return 0; 1900 } 1901 1902 case Op_Jump: 1903 case Op_NeverBranch: 1904 case Op_TailCall: 1905 case Op_TailJump: 1906 case Op_Return: 1907 case Op_Halt: 1908 case Op_Rethrow: 1909 return 0; 1910 1911 default: 1912 ShouldNotReachHere(); 1913 } 1914 1915 return 0; 1916 } 1917 1918 //------------------------------succ_fall_through----------------------------- 1919 // Return true if a specific successor could be fall-through target. 1920 bool Block::succ_fall_through(uint i) { 1921 int eidx = end_idx(); 1922 Node *n = get_node(eidx); // Get ending Node 1923 1924 int op = n->Opcode(); 1925 if (n->is_Mach()) { 1926 if (n->is_MachNullCheck()) { 1927 // In theory, either side can fall-thru, for simplicity sake, 1928 // let's say only the false branch can now. 1929 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1930 } 1931 op = n->as_Mach()->ideal_Opcode(); 1932 } 1933 1934 // Switch on branch type 1935 switch( op ) { 1936 case Op_CountedLoopEnd: 1937 case Op_If: 1938 case Op_Root: 1939 case Op_Goto: 1940 return true; 1941 1942 case Op_Catch: { 1943 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1944 return ci->_con == CatchProjNode::fall_through_index; 1945 } 1946 1947 case Op_Jump: 1948 case Op_NeverBranch: 1949 case Op_TailCall: 1950 case Op_TailJump: 1951 case Op_Return: 1952 case Op_Halt: 1953 case Op_Rethrow: 1954 return false; 1955 1956 default: 1957 ShouldNotReachHere(); 1958 } 1959 1960 return false; 1961 } 1962 1963 //------------------------------update_uncommon_branch------------------------ 1964 // Update the probability of a two-branch to be uncommon 1965 void Block::update_uncommon_branch(Block* ub) { 1966 int eidx = end_idx(); 1967 Node *n = get_node(eidx); // Get ending Node 1968 1969 int op = n->as_Mach()->ideal_Opcode(); 1970 1971 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 1972 assert(num_fall_throughs() == 2, "must be a two way branch block"); 1973 1974 // Which successor is ub? 1975 uint s; 1976 for (s = 0; s <_num_succs; s++) { 1977 if (_succs[s] == ub) break; 1978 } 1979 assert(s < 2, "uncommon successor must be found"); 1980 1981 // If ub is the true path, make the proability small, else 1982 // ub is the false path, and make the probability large 1983 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 1984 1985 // Get existing probability 1986 float p = n->as_MachIf()->_prob; 1987 1988 if (invert) p = 1.0 - p; 1989 if (p > PROB_MIN) { 1990 p = PROB_MIN; 1991 } 1992 if (invert) p = 1.0 - p; 1993 1994 n->as_MachIf()->_prob = p; 1995 } 1996 1997 //------------------------------update_succ_freq------------------------------- 1998 // Update the appropriate frequency associated with block 'b', a successor of 1999 // a block in this loop. 2000 void CFGLoop::update_succ_freq(Block* b, double freq) { 2001 if (b->_loop == this) { 2002 if (b == head()) { 2003 // back branch within the loop 2004 // Do nothing now, the loop carried frequency will be 2005 // adjust later in scale_freq(). 2006 } else { 2007 // simple branch within the loop 2008 b->_freq += freq; 2009 } 2010 } else if (!in_loop_nest(b)) { 2011 // branch is exit from this loop 2012 BlockProbPair bpp(b, freq); 2013 _exits.append(bpp); 2014 } else { 2015 // branch into nested loop 2016 CFGLoop* ch = b->_loop; 2017 ch->_freq += freq; 2018 } 2019 } 2020 2021 //------------------------------in_loop_nest----------------------------------- 2022 // Determine if block b is in the receiver's loop nest. 2023 bool CFGLoop::in_loop_nest(Block* b) { 2024 int depth = _depth; 2025 CFGLoop* b_loop = b->_loop; 2026 int b_depth = b_loop->_depth; 2027 if (depth == b_depth) { 2028 return true; 2029 } 2030 while (b_depth > depth) { 2031 b_loop = b_loop->_parent; 2032 b_depth = b_loop->_depth; 2033 } 2034 return b_loop == this; 2035 } 2036 2037 //------------------------------scale_freq------------------------------------- 2038 // Scale frequency of loops and blocks by trip counts from outer loops 2039 // Do a top down traversal of loop tree (visit outer loops first.) 2040 void CFGLoop::scale_freq() { 2041 double loop_freq = _freq * trip_count(); 2042 _freq = loop_freq; 2043 for (int i = 0; i < _members.length(); i++) { 2044 CFGElement* s = _members.at(i); 2045 double block_freq = s->_freq * loop_freq; 2046 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 2047 block_freq = MIN_BLOCK_FREQUENCY; 2048 s->_freq = block_freq; 2049 } 2050 CFGLoop* ch = _child; 2051 while (ch != NULL) { 2052 ch->scale_freq(); 2053 ch = ch->_sibling; 2054 } 2055 } 2056 2057 // Frequency of outer loop 2058 double CFGLoop::outer_loop_freq() const { 2059 if (_child != NULL) { 2060 return _child->_freq; 2061 } 2062 return _freq; 2063 } 2064 2065 #ifndef PRODUCT 2066 //------------------------------dump_tree-------------------------------------- 2067 void CFGLoop::dump_tree() const { 2068 dump(); 2069 if (_child != NULL) _child->dump_tree(); 2070 if (_sibling != NULL) _sibling->dump_tree(); 2071 } 2072 2073 //------------------------------dump------------------------------------------- 2074 void CFGLoop::dump() const { 2075 for (int i = 0; i < _depth; i++) tty->print(" "); 2076 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2077 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2078 for (int i = 0; i < _depth; i++) tty->print(" "); 2079 tty->print(" members:"); 2080 int k = 0; 2081 for (int i = 0; i < _members.length(); i++) { 2082 if (k++ >= 6) { 2083 tty->print("\n "); 2084 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2085 k = 0; 2086 } 2087 CFGElement *s = _members.at(i); 2088 if (s->is_block()) { 2089 Block *b = s->as_Block(); 2090 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2091 } else { 2092 CFGLoop* lp = s->as_CFGLoop(); 2093 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2094 } 2095 } 2096 tty->print("\n"); 2097 for (int i = 0; i < _depth; i++) tty->print(" "); 2098 tty->print(" exits: "); 2099 k = 0; 2100 for (int i = 0; i < _exits.length(); i++) { 2101 if (k++ >= 7) { 2102 tty->print("\n "); 2103 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2104 k = 0; 2105 } 2106 Block *blk = _exits.at(i).get_target(); 2107 double prob = _exits.at(i).get_prob(); 2108 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2109 } 2110 tty->print("\n"); 2111 } 2112 #endif