1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "opto/chaitin.hpp" 38 #include "runtime/deoptimization.hpp" 39 40 // Portions of code courtesy of Clifford Click 41 42 // Optimization - Graph Style 43 44 // To avoid float value underflow 45 #define MIN_BLOCK_FREQUENCY 1.e-35f 46 47 //----------------------------schedule_node_into_block------------------------- 48 // Insert node n into block b. Look for projections of n and make sure they 49 // are in b also. 50 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 51 // Set basic block of n, Add n to b, 52 map_node_to_block(n, b); 53 b->add_inst(n); 54 55 // After Matching, nearly any old Node may have projections trailing it. 56 // These are usually machine-dependent flags. In any case, they might 57 // float to another block below this one. Move them up. 58 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 59 Node* use = n->fast_out(i); 60 if (use->is_Proj()) { 61 Block* buse = get_block_for_node(use); 62 if (buse != b) { // In wrong block? 63 if (buse != NULL) { 64 buse->find_remove(use); // Remove from wrong block 65 } 66 map_node_to_block(use, b); 67 b->add_inst(use); 68 } 69 } 70 } 71 } 72 73 //----------------------------replace_block_proj_ctrl------------------------- 74 // Nodes that have is_block_proj() nodes as their control need to use 75 // the appropriate Region for their actual block as their control since 76 // the projection will be in a predecessor block. 77 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 78 const Node *in0 = n->in(0); 79 assert(in0 != NULL, "Only control-dependent"); 80 const Node *p = in0->is_block_proj(); 81 if (p != NULL && p != n) { // Control from a block projection? 82 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 83 // Find trailing Region 84 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 85 uint j = 0; 86 if (pb->_num_succs != 1) { // More then 1 successor? 87 // Search for successor 88 uint max = pb->number_of_nodes(); 89 assert( max > 1, "" ); 90 uint start = max - pb->_num_succs; 91 // Find which output path belongs to projection 92 for (j = start; j < max; j++) { 93 if( pb->get_node(j) == in0 ) 94 break; 95 } 96 assert( j < max, "must find" ); 97 // Change control to match head of successor basic block 98 j -= start; 99 } 100 n->set_req(0, pb->_succs[j]->head()); 101 } 102 } 103 104 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) { 105 if (dom_node == node) { 106 return true; 107 } 108 Block* d = get_block_for_node(dom_node); 109 Block* n = get_block_for_node(node); 110 if (d == n) { 111 if (dom_node->is_block_start()) { 112 return true; 113 } 114 if (node->is_block_start()) { 115 return false; 116 } 117 if (dom_node->is_block_proj()) { 118 return false; 119 } 120 if (node->is_block_proj()) { 121 return true; 122 } 123 #ifdef ASSERT 124 node->dump(); 125 dom_node->dump(); 126 #endif 127 fatal("unhandled"); 128 return false; 129 } 130 return d->dom_lca(n) == d; 131 } 132 133 //------------------------------schedule_pinned_nodes-------------------------- 134 // Set the basic block for Nodes pinned into blocks 135 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 136 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 137 GrowableArray <Node *> spstack(C->live_nodes() + 8); 138 spstack.push(_root); 139 while (spstack.is_nonempty()) { 140 Node* node = spstack.pop(); 141 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 142 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 143 assert(node->in(0), "pinned Node must have Control"); 144 // Before setting block replace block_proj control edge 145 replace_block_proj_ctrl(node); 146 Node* input = node->in(0); 147 while (!input->is_block_start()) { 148 input = input->in(0); 149 } 150 Block* block = get_block_for_node(input); // Basic block of controlling input 151 schedule_node_into_block(node, block); 152 } 153 154 // If the node has precedence edges (added when CastPP nodes are 155 // removed in final_graph_reshaping), fix the control of the 156 // node to cover the precedence edges and remove the 157 // dependencies. 158 Node* n = NULL; 159 for (uint i = node->len()-1; i >= node->req(); i--) { 160 Node* m = node->in(i); 161 if (m == NULL) continue; 162 // Skip the precedence edge if the test that guarded a CastPP: 163 // - was optimized out during escape analysis 164 // (OptimizePtrCompare): the CastPP's control isn't an end of 165 // block. 166 // - is moved in the branch of a dominating If: the control of 167 // the CastPP is then a Region. 168 if (m->is_block_proj() || m->is_block_start()) { 169 node->rm_prec(i); 170 if (n == NULL) { 171 n = m; 172 } else { 173 assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other"); 174 n = is_dominator(n, m) ? m : n; 175 } 176 } 177 } 178 if (n != NULL) { 179 assert(node->in(0), "control should have been set"); 180 assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other"); 181 if (!is_dominator(n, node->in(0))) { 182 node->set_req(0, n); 183 } 184 } 185 186 // process all inputs that are non NULL 187 for (int i = node->req() - 1; i >= 0; --i) { 188 if (node->in(i) != NULL) { 189 spstack.push(node->in(i)); 190 } 191 } 192 } 193 } 194 } 195 196 #ifdef ASSERT 197 // Assert that new input b2 is dominated by all previous inputs. 198 // Check this by by seeing that it is dominated by b1, the deepest 199 // input observed until b2. 200 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 201 if (b1 == NULL) return; 202 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 203 Block* tmp = b2; 204 while (tmp != b1 && tmp != NULL) { 205 tmp = tmp->_idom; 206 } 207 if (tmp != b1) { 208 // Detected an unschedulable graph. Print some nice stuff and die. 209 tty->print_cr("!!! Unschedulable graph !!!"); 210 for (uint j=0; j<n->len(); j++) { // For all inputs 211 Node* inn = n->in(j); // Get input 212 if (inn == NULL) continue; // Ignore NULL, missing inputs 213 Block* inb = cfg->get_block_for_node(inn); 214 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 215 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 216 inn->dump(); 217 } 218 tty->print("Failing node: "); 219 n->dump(); 220 assert(false, "unscheduable graph"); 221 } 222 } 223 #endif 224 225 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 226 // Find the last input dominated by all other inputs. 227 Block* deepb = NULL; // Deepest block so far 228 int deepb_dom_depth = 0; 229 for (uint k = 0; k < n->len(); k++) { // For all inputs 230 Node* inn = n->in(k); // Get input 231 if (inn == NULL) continue; // Ignore NULL, missing inputs 232 Block* inb = cfg->get_block_for_node(inn); 233 assert(inb != NULL, "must already have scheduled this input"); 234 if (deepb_dom_depth < (int) inb->_dom_depth) { 235 // The new inb must be dominated by the previous deepb. 236 // The various inputs must be linearly ordered in the dom 237 // tree, or else there will not be a unique deepest block. 238 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 239 deepb = inb; // Save deepest block 240 deepb_dom_depth = deepb->_dom_depth; 241 } 242 } 243 assert(deepb != NULL, "must be at least one input to n"); 244 return deepb; 245 } 246 247 248 //------------------------------schedule_early--------------------------------- 249 // Find the earliest Block any instruction can be placed in. Some instructions 250 // are pinned into Blocks. Unpinned instructions can appear in last block in 251 // which all their inputs occur. 252 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) { 253 // Allocate stack with enough space to avoid frequent realloc 254 Node_Stack nstack(roots.size() + 8); 255 // _root will be processed among C->top() inputs 256 roots.push(C->top(), 0); 257 visited.set(C->top()->_idx); 258 259 while (roots.size() != 0) { 260 // Use local variables nstack_top_n & nstack_top_i to cache values 261 // on stack's top. 262 Node* parent_node = roots.node(); 263 uint input_index = 0; 264 roots.pop(); 265 266 while (true) { 267 if (input_index == 0) { 268 // Fixup some control. Constants without control get attached 269 // to root and nodes that use is_block_proj() nodes should be attached 270 // to the region that starts their block. 271 const Node* control_input = parent_node->in(0); 272 if (control_input != NULL) { 273 replace_block_proj_ctrl(parent_node); 274 } else { 275 // Is a constant with NO inputs? 276 if (parent_node->req() == 1) { 277 parent_node->set_req(0, _root); 278 } 279 } 280 } 281 282 // First, visit all inputs and force them to get a block. If an 283 // input is already in a block we quit following inputs (to avoid 284 // cycles). Instead we put that Node on a worklist to be handled 285 // later (since IT'S inputs may not have a block yet). 286 287 // Assume all n's inputs will be processed 288 bool done = true; 289 290 while (input_index < parent_node->len()) { 291 Node* in = parent_node->in(input_index++); 292 if (in == NULL) { 293 continue; 294 } 295 296 int is_visited = visited.test_set(in->_idx); 297 if (!has_block(in)) { 298 if (is_visited) { 299 return false; 300 } 301 // Save parent node and next input's index. 302 nstack.push(parent_node, input_index); 303 // Process current input now. 304 parent_node = in; 305 input_index = 0; 306 // Not all n's inputs processed. 307 done = false; 308 break; 309 } else if (!is_visited) { 310 // Visit this guy later, using worklist 311 roots.push(in, 0); 312 } 313 } 314 315 if (done) { 316 // All of n's inputs have been processed, complete post-processing. 317 318 // Some instructions are pinned into a block. These include Region, 319 // Phi, Start, Return, and other control-dependent instructions and 320 // any projections which depend on them. 321 if (!parent_node->pinned()) { 322 // Set earliest legal block. 323 Block* earliest_block = find_deepest_input(parent_node, this); 324 map_node_to_block(parent_node, earliest_block); 325 } else { 326 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 327 } 328 329 if (nstack.is_empty()) { 330 // Finished all nodes on stack. 331 // Process next node on the worklist 'roots'. 332 break; 333 } 334 // Get saved parent node and next input's index. 335 parent_node = nstack.node(); 336 input_index = nstack.index(); 337 nstack.pop(); 338 } 339 } 340 } 341 return true; 342 } 343 344 //------------------------------dom_lca---------------------------------------- 345 // Find least common ancestor in dominator tree 346 // LCA is a current notion of LCA, to be raised above 'this'. 347 // As a convenient boundary condition, return 'this' if LCA is NULL. 348 // Find the LCA of those two nodes. 349 Block* Block::dom_lca(Block* LCA) { 350 if (LCA == NULL || LCA == this) return this; 351 352 Block* anc = this; 353 while (anc->_dom_depth > LCA->_dom_depth) 354 anc = anc->_idom; // Walk up till anc is as high as LCA 355 356 while (LCA->_dom_depth > anc->_dom_depth) 357 LCA = LCA->_idom; // Walk up till LCA is as high as anc 358 359 while (LCA != anc) { // Walk both up till they are the same 360 LCA = LCA->_idom; 361 anc = anc->_idom; 362 } 363 364 return LCA; 365 } 366 367 //--------------------------raise_LCA_above_use-------------------------------- 368 // We are placing a definition, and have been given a def->use edge. 369 // The definition must dominate the use, so move the LCA upward in the 370 // dominator tree to dominate the use. If the use is a phi, adjust 371 // the LCA only with the phi input paths which actually use this def. 372 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 373 Block* buse = cfg->get_block_for_node(use); 374 if (buse == NULL) return LCA; // Unused killing Projs have no use block 375 if (!use->is_Phi()) return buse->dom_lca(LCA); 376 uint pmax = use->req(); // Number of Phi inputs 377 // Why does not this loop just break after finding the matching input to 378 // the Phi? Well...it's like this. I do not have true def-use/use-def 379 // chains. Means I cannot distinguish, from the def-use direction, which 380 // of many use-defs lead from the same use to the same def. That is, this 381 // Phi might have several uses of the same def. Each use appears in a 382 // different predecessor block. But when I enter here, I cannot distinguish 383 // which use-def edge I should find the predecessor block for. So I find 384 // them all. Means I do a little extra work if a Phi uses the same value 385 // more than once. 386 for (uint j=1; j<pmax; j++) { // For all inputs 387 if (use->in(j) == def) { // Found matching input? 388 Block* pred = cfg->get_block_for_node(buse->pred(j)); 389 LCA = pred->dom_lca(LCA); 390 } 391 } 392 return LCA; 393 } 394 395 //----------------------------raise_LCA_above_marks---------------------------- 396 // Return a new LCA that dominates LCA and any of its marked predecessors. 397 // Search all my parents up to 'early' (exclusive), looking for predecessors 398 // which are marked with the given index. Return the LCA (in the dom tree) 399 // of all marked blocks. If there are none marked, return the original 400 // LCA. 401 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 402 Block_List worklist; 403 worklist.push(LCA); 404 while (worklist.size() > 0) { 405 Block* mid = worklist.pop(); 406 if (mid == early) continue; // stop searching here 407 408 // Test and set the visited bit. 409 if (mid->raise_LCA_visited() == mark) continue; // already visited 410 411 // Don't process the current LCA, otherwise the search may terminate early 412 if (mid != LCA && mid->raise_LCA_mark() == mark) { 413 // Raise the LCA. 414 LCA = mid->dom_lca(LCA); 415 if (LCA == early) break; // stop searching everywhere 416 assert(early->dominates(LCA), "early is high enough"); 417 // Resume searching at that point, skipping intermediate levels. 418 worklist.push(LCA); 419 if (LCA == mid) 420 continue; // Don't mark as visited to avoid early termination. 421 } else { 422 // Keep searching through this block's predecessors. 423 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 424 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 425 worklist.push(mid_parent); 426 } 427 } 428 mid->set_raise_LCA_visited(mark); 429 } 430 return LCA; 431 } 432 433 //--------------------------memory_early_block-------------------------------- 434 // This is a variation of find_deepest_input, the heart of schedule_early. 435 // Find the "early" block for a load, if we considered only memory and 436 // address inputs, that is, if other data inputs were ignored. 437 // 438 // Because a subset of edges are considered, the resulting block will 439 // be earlier (at a shallower dom_depth) than the true schedule_early 440 // point of the node. We compute this earlier block as a more permissive 441 // site for anti-dependency insertion, but only if subsume_loads is enabled. 442 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 443 Node* base; 444 Node* index; 445 Node* store = load->in(MemNode::Memory); 446 load->as_Mach()->memory_inputs(base, index); 447 448 assert(base != NodeSentinel && index != NodeSentinel, 449 "unexpected base/index inputs"); 450 451 Node* mem_inputs[4]; 452 int mem_inputs_length = 0; 453 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 454 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 455 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 456 457 // In the comparision below, add one to account for the control input, 458 // which may be null, but always takes up a spot in the in array. 459 if (mem_inputs_length + 1 < (int) load->req()) { 460 // This "load" has more inputs than just the memory, base and index inputs. 461 // For purposes of checking anti-dependences, we need to start 462 // from the early block of only the address portion of the instruction, 463 // and ignore other blocks that may have factored into the wider 464 // schedule_early calculation. 465 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 466 467 Block* deepb = NULL; // Deepest block so far 468 int deepb_dom_depth = 0; 469 for (int i = 0; i < mem_inputs_length; i++) { 470 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 471 if (deepb_dom_depth < (int) inb->_dom_depth) { 472 // The new inb must be dominated by the previous deepb. 473 // The various inputs must be linearly ordered in the dom 474 // tree, or else there will not be a unique deepest block. 475 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 476 deepb = inb; // Save deepest block 477 deepb_dom_depth = deepb->_dom_depth; 478 } 479 } 480 early = deepb; 481 } 482 483 return early; 484 } 485 486 //--------------------------insert_anti_dependences--------------------------- 487 // A load may need to witness memory that nearby stores can overwrite. 488 // For each nearby store, either insert an "anti-dependence" edge 489 // from the load to the store, or else move LCA upward to force the 490 // load to (eventually) be scheduled in a block above the store. 491 // 492 // Do not add edges to stores on distinct control-flow paths; 493 // only add edges to stores which might interfere. 494 // 495 // Return the (updated) LCA. There will not be any possibly interfering 496 // store between the load's "early block" and the updated LCA. 497 // Any stores in the updated LCA will have new precedence edges 498 // back to the load. The caller is expected to schedule the load 499 // in the LCA, in which case the precedence edges will make LCM 500 // preserve anti-dependences. The caller may also hoist the load 501 // above the LCA, if it is not the early block. 502 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 503 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 504 assert(LCA != NULL, ""); 505 DEBUG_ONLY(Block* LCA_orig = LCA); 506 507 // Compute the alias index. Loads and stores with different alias indices 508 // do not need anti-dependence edges. 509 int load_alias_idx = C->get_alias_index(load->adr_type()); 510 #ifdef ASSERT 511 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 512 (PrintOpto || VerifyAliases || 513 PrintMiscellaneous && (WizardMode || Verbose))) { 514 // Load nodes should not consume all of memory. 515 // Reporting a bottom type indicates a bug in adlc. 516 // If some particular type of node validly consumes all of memory, 517 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 518 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 519 load->dump(2); 520 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 521 } 522 #endif 523 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 524 "String compare is only known 'load' that does not conflict with any stores"); 525 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 526 "String equals is a 'load' that does not conflict with any stores"); 527 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 528 "String indexOf is a 'load' that does not conflict with any stores"); 529 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOfChar), 530 "String indexOfChar is a 'load' that does not conflict with any stores"); 531 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 532 "Arrays equals is a 'load' that does not conflict with any stores"); 533 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_HasNegatives), 534 "HasNegatives is a 'load' that does not conflict with any stores"); 535 536 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 537 // It is impossible to spoil this load by putting stores before it, 538 // because we know that the stores will never update the value 539 // which 'load' must witness. 540 return LCA; 541 } 542 543 node_idx_t load_index = load->_idx; 544 545 // Note the earliest legal placement of 'load', as determined by 546 // by the unique point in the dom tree where all memory effects 547 // and other inputs are first available. (Computed by schedule_early.) 548 // For normal loads, 'early' is the shallowest place (dom graph wise) 549 // to look for anti-deps between this load and any store. 550 Block* early = get_block_for_node(load); 551 552 // If we are subsuming loads, compute an "early" block that only considers 553 // memory or address inputs. This block may be different than the 554 // schedule_early block in that it could be at an even shallower depth in the 555 // dominator tree, and allow for a broader discovery of anti-dependences. 556 if (C->subsume_loads()) { 557 early = memory_early_block(load, early, this); 558 } 559 560 ResourceArea *area = Thread::current()->resource_area(); 561 Node_List worklist_mem(area); // prior memory state to store 562 Node_List worklist_store(area); // possible-def to explore 563 Node_List worklist_visited(area); // visited mergemem nodes 564 Node_List non_early_stores(area); // all relevant stores outside of early 565 bool must_raise_LCA = false; 566 567 #ifdef TRACK_PHI_INPUTS 568 // %%% This extra checking fails because MergeMem nodes are not GVNed. 569 // Provide "phi_inputs" to check if every input to a PhiNode is from the 570 // original memory state. This indicates a PhiNode for which should not 571 // prevent the load from sinking. For such a block, set_raise_LCA_mark 572 // may be overly conservative. 573 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 574 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 575 #endif 576 577 // 'load' uses some memory state; look for users of the same state. 578 // Recurse through MergeMem nodes to the stores that use them. 579 580 // Each of these stores is a possible definition of memory 581 // that 'load' needs to use. We need to force 'load' 582 // to occur before each such store. When the store is in 583 // the same block as 'load', we insert an anti-dependence 584 // edge load->store. 585 586 // The relevant stores "nearby" the load consist of a tree rooted 587 // at initial_mem, with internal nodes of type MergeMem. 588 // Therefore, the branches visited by the worklist are of this form: 589 // initial_mem -> (MergeMem ->)* store 590 // The anti-dependence constraints apply only to the fringe of this tree. 591 592 Node* initial_mem = load->in(MemNode::Memory); 593 worklist_store.push(initial_mem); 594 worklist_visited.push(initial_mem); 595 worklist_mem.push(NULL); 596 while (worklist_store.size() > 0) { 597 // Examine a nearby store to see if it might interfere with our load. 598 Node* mem = worklist_mem.pop(); 599 Node* store = worklist_store.pop(); 600 uint op = store->Opcode(); 601 602 // MergeMems do not directly have anti-deps. 603 // Treat them as internal nodes in a forward tree of memory states, 604 // the leaves of which are each a 'possible-def'. 605 if (store == initial_mem // root (exclusive) of tree we are searching 606 || op == Op_MergeMem // internal node of tree we are searching 607 ) { 608 mem = store; // It's not a possibly interfering store. 609 if (store == initial_mem) 610 initial_mem = NULL; // only process initial memory once 611 612 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 613 store = mem->fast_out(i); 614 if (store->is_MergeMem()) { 615 // Be sure we don't get into combinatorial problems. 616 // (Allow phis to be repeated; they can merge two relevant states.) 617 uint j = worklist_visited.size(); 618 for (; j > 0; j--) { 619 if (worklist_visited.at(j-1) == store) break; 620 } 621 if (j > 0) continue; // already on work list; do not repeat 622 worklist_visited.push(store); 623 } 624 worklist_mem.push(mem); 625 worklist_store.push(store); 626 } 627 continue; 628 } 629 630 if (op == Op_MachProj || op == Op_Catch) continue; 631 if (store->needs_anti_dependence_check()) continue; // not really a store 632 633 // Compute the alias index. Loads and stores with different alias 634 // indices do not need anti-dependence edges. Wide MemBar's are 635 // anti-dependent on everything (except immutable memories). 636 const TypePtr* adr_type = store->adr_type(); 637 if (!C->can_alias(adr_type, load_alias_idx)) continue; 638 639 // Most slow-path runtime calls do NOT modify Java memory, but 640 // they can block and so write Raw memory. 641 if (store->is_Mach()) { 642 MachNode* mstore = store->as_Mach(); 643 if (load_alias_idx != Compile::AliasIdxRaw) { 644 // Check for call into the runtime using the Java calling 645 // convention (and from there into a wrapper); it has no 646 // _method. Can't do this optimization for Native calls because 647 // they CAN write to Java memory. 648 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 649 assert(mstore->is_MachSafePoint(), ""); 650 MachSafePointNode* ms = (MachSafePointNode*) mstore; 651 assert(ms->is_MachCallJava(), ""); 652 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 653 if (mcj->_method == NULL) { 654 // These runtime calls do not write to Java visible memory 655 // (other than Raw) and so do not require anti-dependence edges. 656 continue; 657 } 658 } 659 // Same for SafePoints: they read/write Raw but only read otherwise. 660 // This is basically a workaround for SafePoints only defining control 661 // instead of control + memory. 662 if (mstore->ideal_Opcode() == Op_SafePoint) 663 continue; 664 } else { 665 // Some raw memory, such as the load of "top" at an allocation, 666 // can be control dependent on the previous safepoint. See 667 // comments in GraphKit::allocate_heap() about control input. 668 // Inserting an anti-dep between such a safepoint and a use 669 // creates a cycle, and will cause a subsequent failure in 670 // local scheduling. (BugId 4919904) 671 // (%%% How can a control input be a safepoint and not a projection??) 672 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 673 continue; 674 } 675 } 676 677 // Identify a block that the current load must be above, 678 // or else observe that 'store' is all the way up in the 679 // earliest legal block for 'load'. In the latter case, 680 // immediately insert an anti-dependence edge. 681 Block* store_block = get_block_for_node(store); 682 assert(store_block != NULL, "unused killing projections skipped above"); 683 684 if (store->is_Phi()) { 685 // 'load' uses memory which is one (or more) of the Phi's inputs. 686 // It must be scheduled not before the Phi, but rather before 687 // each of the relevant Phi inputs. 688 // 689 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 690 // we mark each corresponding predecessor block and do a combined 691 // hoisting operation later (raise_LCA_above_marks). 692 // 693 // Do not assert(store_block != early, "Phi merging memory after access") 694 // PhiNode may be at start of block 'early' with backedge to 'early' 695 DEBUG_ONLY(bool found_match = false); 696 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 697 if (store->in(j) == mem) { // Found matching input? 698 DEBUG_ONLY(found_match = true); 699 Block* pred_block = get_block_for_node(store_block->pred(j)); 700 if (pred_block != early) { 701 // If any predecessor of the Phi matches the load's "early block", 702 // we do not need a precedence edge between the Phi and 'load' 703 // since the load will be forced into a block preceding the Phi. 704 pred_block->set_raise_LCA_mark(load_index); 705 assert(!LCA_orig->dominates(pred_block) || 706 early->dominates(pred_block), "early is high enough"); 707 must_raise_LCA = true; 708 } else { 709 // anti-dependent upon PHI pinned below 'early', no edge needed 710 LCA = early; // but can not schedule below 'early' 711 } 712 } 713 } 714 assert(found_match, "no worklist bug"); 715 #ifdef TRACK_PHI_INPUTS 716 #ifdef ASSERT 717 // This assert asks about correct handling of PhiNodes, which may not 718 // have all input edges directly from 'mem'. See BugId 4621264 719 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 720 // Increment by exactly one even if there are multiple copies of 'mem' 721 // coming into the phi, because we will run this block several times 722 // if there are several copies of 'mem'. (That's how DU iterators work.) 723 phi_inputs.at_put(store->_idx, num_mem_inputs); 724 assert(PhiNode::Input + num_mem_inputs < store->req(), 725 "Expect at least one phi input will not be from original memory state"); 726 #endif //ASSERT 727 #endif //TRACK_PHI_INPUTS 728 } else if (store_block != early) { 729 // 'store' is between the current LCA and earliest possible block. 730 // Label its block, and decide later on how to raise the LCA 731 // to include the effect on LCA of this store. 732 // If this store's block gets chosen as the raised LCA, we 733 // will find him on the non_early_stores list and stick him 734 // with a precedence edge. 735 // (But, don't bother if LCA is already raised all the way.) 736 if (LCA != early) { 737 store_block->set_raise_LCA_mark(load_index); 738 must_raise_LCA = true; 739 non_early_stores.push(store); 740 } 741 } else { 742 // Found a possibly-interfering store in the load's 'early' block. 743 // This means 'load' cannot sink at all in the dominator tree. 744 // Add an anti-dep edge, and squeeze 'load' into the highest block. 745 assert(store != load->in(0), "dependence cycle found"); 746 if (verify) { 747 assert(store->find_edge(load) != -1, "missing precedence edge"); 748 } else { 749 store->add_prec(load); 750 } 751 LCA = early; 752 // This turns off the process of gathering non_early_stores. 753 } 754 } 755 // (Worklist is now empty; all nearby stores have been visited.) 756 757 // Finished if 'load' must be scheduled in its 'early' block. 758 // If we found any stores there, they have already been given 759 // precedence edges. 760 if (LCA == early) return LCA; 761 762 // We get here only if there are no possibly-interfering stores 763 // in the load's 'early' block. Move LCA up above all predecessors 764 // which contain stores we have noted. 765 // 766 // The raised LCA block can be a home to such interfering stores, 767 // but its predecessors must not contain any such stores. 768 // 769 // The raised LCA will be a lower bound for placing the load, 770 // preventing the load from sinking past any block containing 771 // a store that may invalidate the memory state required by 'load'. 772 if (must_raise_LCA) 773 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 774 if (LCA == early) return LCA; 775 776 // Insert anti-dependence edges from 'load' to each store 777 // in the non-early LCA block. 778 // Mine the non_early_stores list for such stores. 779 if (LCA->raise_LCA_mark() == load_index) { 780 while (non_early_stores.size() > 0) { 781 Node* store = non_early_stores.pop(); 782 Block* store_block = get_block_for_node(store); 783 if (store_block == LCA) { 784 // add anti_dependence from store to load in its own block 785 assert(store != load->in(0), "dependence cycle found"); 786 if (verify) { 787 assert(store->find_edge(load) != -1, "missing precedence edge"); 788 } else { 789 store->add_prec(load); 790 } 791 } else { 792 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 793 // Any other stores we found must be either inside the new LCA 794 // or else outside the original LCA. In the latter case, they 795 // did not interfere with any use of 'load'. 796 assert(LCA->dominates(store_block) 797 || !LCA_orig->dominates(store_block), "no stray stores"); 798 } 799 } 800 } 801 802 // Return the highest block containing stores; any stores 803 // within that block have been given anti-dependence edges. 804 return LCA; 805 } 806 807 // This class is used to iterate backwards over the nodes in the graph. 808 809 class Node_Backward_Iterator { 810 811 private: 812 Node_Backward_Iterator(); 813 814 public: 815 // Constructor for the iterator 816 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg); 817 818 // Postincrement operator to iterate over the nodes 819 Node *next(); 820 821 private: 822 VectorSet &_visited; 823 Node_Stack &_stack; 824 PhaseCFG &_cfg; 825 }; 826 827 // Constructor for the Node_Backward_Iterator 828 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg) 829 : _visited(visited), _stack(stack), _cfg(cfg) { 830 // The stack should contain exactly the root 831 stack.clear(); 832 stack.push(root, root->outcnt()); 833 834 // Clear the visited bits 835 visited.Clear(); 836 } 837 838 // Iterator for the Node_Backward_Iterator 839 Node *Node_Backward_Iterator::next() { 840 841 // If the _stack is empty, then just return NULL: finished. 842 if ( !_stack.size() ) 843 return NULL; 844 845 // I visit unvisited not-anti-dependence users first, then anti-dependent 846 // children next. I iterate backwards to support removal of nodes. 847 // The stack holds states consisting of 3 values: 848 // current Def node, flag which indicates 1st/2nd pass, index of current out edge 849 Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1); 850 bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1); 851 uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes. 852 _stack.pop(); 853 854 // I cycle here when I am entering a deeper level of recursion. 855 // The key variable 'self' was set prior to jumping here. 856 while( 1 ) { 857 858 _visited.set(self->_idx); 859 860 // Now schedule all uses as late as possible. 861 const Node* src = self->is_Proj() ? self->in(0) : self; 862 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 863 864 // Schedule all nodes in a post-order visit 865 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 866 867 // Scan for unvisited nodes 868 while (idx > 0) { 869 // For all uses, schedule late 870 Node* n = self->raw_out(--idx); // Use 871 872 // Skip already visited children 873 if ( _visited.test(n->_idx) ) 874 continue; 875 876 // do not traverse backward control edges 877 Node *use = n->is_Proj() ? n->in(0) : n; 878 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 879 880 if ( use_rpo < src_rpo ) 881 continue; 882 883 // Phi nodes always precede uses in a basic block 884 if ( use_rpo == src_rpo && use->is_Phi() ) 885 continue; 886 887 unvisited = n; // Found unvisited 888 889 // Check for possible-anti-dependent 890 // 1st pass: No such nodes, 2nd pass: Only such nodes. 891 if (n->needs_anti_dependence_check() == iterate_anti_dep) { 892 unvisited = n; // Found unvisited 893 break; 894 } 895 } 896 897 // Did I find an unvisited not-anti-dependent Node? 898 if (!unvisited) { 899 if (!iterate_anti_dep) { 900 // 2nd pass: Iterate over nodes which needs_anti_dependence_check. 901 iterate_anti_dep = true; 902 idx = self->outcnt(); 903 continue; 904 } 905 break; // All done with children; post-visit 'self' 906 } 907 908 // Visit the unvisited Node. Contains the obvious push to 909 // indicate I'm entering a deeper level of recursion. I push the 910 // old state onto the _stack and set a new state and loop (recurse). 911 _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx); 912 self = unvisited; 913 iterate_anti_dep = false; 914 idx = self->outcnt(); 915 } // End recursion loop 916 917 return self; 918 } 919 920 //------------------------------ComputeLatenciesBackwards---------------------- 921 // Compute the latency of all the instructions. 922 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) { 923 #ifndef PRODUCT 924 if (trace_opto_pipelining()) 925 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 926 #endif 927 928 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 929 Node *n; 930 931 // Walk over all the nodes from last to first 932 while (n = iter.next()) { 933 // Set the latency for the definitions of this instruction 934 partial_latency_of_defs(n); 935 } 936 } // end ComputeLatenciesBackwards 937 938 //------------------------------partial_latency_of_defs------------------------ 939 // Compute the latency impact of this node on all defs. This computes 940 // a number that increases as we approach the beginning of the routine. 941 void PhaseCFG::partial_latency_of_defs(Node *n) { 942 // Set the latency for this instruction 943 #ifndef PRODUCT 944 if (trace_opto_pipelining()) { 945 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 946 dump(); 947 } 948 #endif 949 950 if (n->is_Proj()) { 951 n = n->in(0); 952 } 953 954 if (n->is_Root()) { 955 return; 956 } 957 958 uint nlen = n->len(); 959 uint use_latency = get_latency_for_node(n); 960 uint use_pre_order = get_block_for_node(n)->_pre_order; 961 962 for (uint j = 0; j < nlen; j++) { 963 Node *def = n->in(j); 964 965 if (!def || def == n) { 966 continue; 967 } 968 969 // Walk backwards thru projections 970 if (def->is_Proj()) { 971 def = def->in(0); 972 } 973 974 #ifndef PRODUCT 975 if (trace_opto_pipelining()) { 976 tty->print("# in(%2d): ", j); 977 def->dump(); 978 } 979 #endif 980 981 // If the defining block is not known, assume it is ok 982 Block *def_block = get_block_for_node(def); 983 uint def_pre_order = def_block ? def_block->_pre_order : 0; 984 985 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 986 continue; 987 } 988 989 uint delta_latency = n->latency(j); 990 uint current_latency = delta_latency + use_latency; 991 992 if (get_latency_for_node(def) < current_latency) { 993 set_latency_for_node(def, current_latency); 994 } 995 996 #ifndef PRODUCT 997 if (trace_opto_pipelining()) { 998 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 999 } 1000 #endif 1001 } 1002 } 1003 1004 //------------------------------latency_from_use------------------------------- 1005 // Compute the latency of a specific use 1006 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 1007 // If self-reference, return no latency 1008 if (use == n || use->is_Root()) { 1009 return 0; 1010 } 1011 1012 uint def_pre_order = get_block_for_node(def)->_pre_order; 1013 uint latency = 0; 1014 1015 // If the use is not a projection, then it is simple... 1016 if (!use->is_Proj()) { 1017 #ifndef PRODUCT 1018 if (trace_opto_pipelining()) { 1019 tty->print("# out(): "); 1020 use->dump(); 1021 } 1022 #endif 1023 1024 uint use_pre_order = get_block_for_node(use)->_pre_order; 1025 1026 if (use_pre_order < def_pre_order) 1027 return 0; 1028 1029 if (use_pre_order == def_pre_order && use->is_Phi()) 1030 return 0; 1031 1032 uint nlen = use->len(); 1033 uint nl = get_latency_for_node(use); 1034 1035 for ( uint j=0; j<nlen; j++ ) { 1036 if (use->in(j) == n) { 1037 // Change this if we want local latencies 1038 uint ul = use->latency(j); 1039 uint l = ul + nl; 1040 if (latency < l) latency = l; 1041 #ifndef PRODUCT 1042 if (trace_opto_pipelining()) { 1043 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 1044 nl, j, ul, l, latency); 1045 } 1046 #endif 1047 } 1048 } 1049 } else { 1050 // This is a projection, just grab the latency of the use(s) 1051 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1052 uint l = latency_from_use(use, def, use->fast_out(j)); 1053 if (latency < l) latency = l; 1054 } 1055 } 1056 1057 return latency; 1058 } 1059 1060 //------------------------------latency_from_uses------------------------------ 1061 // Compute the latency of this instruction relative to all of it's uses. 1062 // This computes a number that increases as we approach the beginning of the 1063 // routine. 1064 void PhaseCFG::latency_from_uses(Node *n) { 1065 // Set the latency for this instruction 1066 #ifndef PRODUCT 1067 if (trace_opto_pipelining()) { 1068 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1069 dump(); 1070 } 1071 #endif 1072 uint latency=0; 1073 const Node *def = n->is_Proj() ? n->in(0): n; 1074 1075 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1076 uint l = latency_from_use(n, def, n->fast_out(i)); 1077 1078 if (latency < l) latency = l; 1079 } 1080 1081 set_latency_for_node(n, latency); 1082 } 1083 1084 //------------------------------hoist_to_cheaper_block------------------------- 1085 // Pick a block for node self, between early and LCA, that is a cheaper 1086 // alternative to LCA. 1087 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1088 const double delta = 1+PROB_UNLIKELY_MAG(4); 1089 Block* least = LCA; 1090 double least_freq = least->_freq; 1091 uint target = get_latency_for_node(self); 1092 uint start_latency = get_latency_for_node(LCA->head()); 1093 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1094 bool in_latency = (target <= start_latency); 1095 const Block* root_block = get_block_for_node(_root); 1096 1097 // Turn off latency scheduling if scheduling is just plain off 1098 if (!C->do_scheduling()) 1099 in_latency = true; 1100 1101 // Do not hoist (to cover latency) instructions which target a 1102 // single register. Hoisting stretches the live range of the 1103 // single register and may force spilling. 1104 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1105 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1106 in_latency = true; 1107 1108 #ifndef PRODUCT 1109 if (trace_opto_pipelining()) { 1110 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1111 self->dump(); 1112 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1113 LCA->_pre_order, 1114 LCA->head()->_idx, 1115 start_latency, 1116 LCA->get_node(LCA->end_idx())->_idx, 1117 end_latency, 1118 least_freq); 1119 } 1120 #endif 1121 1122 int cand_cnt = 0; // number of candidates tried 1123 1124 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1125 // the earliest legal location. Capture the least execution frequency. 1126 while (LCA != early) { 1127 LCA = LCA->_idom; // Follow up the dominator tree 1128 1129 if (LCA == NULL) { 1130 // Bailout without retry 1131 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1132 return least; 1133 } 1134 1135 // Don't hoist machine instructions to the root basic block 1136 if (mach && LCA == root_block) 1137 break; 1138 1139 uint start_lat = get_latency_for_node(LCA->head()); 1140 uint end_idx = LCA->end_idx(); 1141 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1142 double LCA_freq = LCA->_freq; 1143 #ifndef PRODUCT 1144 if (trace_opto_pipelining()) { 1145 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1146 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1147 } 1148 #endif 1149 cand_cnt++; 1150 if (LCA_freq < least_freq || // Better Frequency 1151 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1152 (!StressGCM && // Otherwise, choose with latency 1153 !in_latency && // No block containing latency 1154 LCA_freq < least_freq * delta && // No worse frequency 1155 target >= end_lat && // within latency range 1156 !self->is_iteratively_computed() ) // But don't hoist IV increments 1157 // because they may end up above other uses of their phi forcing 1158 // their result register to be different from their input. 1159 ) { 1160 least = LCA; // Found cheaper block 1161 least_freq = LCA_freq; 1162 start_latency = start_lat; 1163 end_latency = end_lat; 1164 if (target <= start_lat) 1165 in_latency = true; 1166 } 1167 } 1168 1169 #ifndef PRODUCT 1170 if (trace_opto_pipelining()) { 1171 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1172 least->_pre_order, start_latency, least_freq); 1173 } 1174 #endif 1175 1176 // See if the latency needs to be updated 1177 if (target < end_latency) { 1178 #ifndef PRODUCT 1179 if (trace_opto_pipelining()) { 1180 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1181 } 1182 #endif 1183 set_latency_for_node(self, end_latency); 1184 partial_latency_of_defs(self); 1185 } 1186 1187 return least; 1188 } 1189 1190 1191 //------------------------------schedule_late----------------------------------- 1192 // Now schedule all codes as LATE as possible. This is the LCA in the 1193 // dominator tree of all USES of a value. Pick the block with the least 1194 // loop nesting depth that is lowest in the dominator tree. 1195 extern const char must_clone[]; 1196 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { 1197 #ifndef PRODUCT 1198 if (trace_opto_pipelining()) 1199 tty->print("\n#---- schedule_late ----\n"); 1200 #endif 1201 1202 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1203 Node *self; 1204 1205 // Walk over all the nodes from last to first 1206 while (self = iter.next()) { 1207 Block* early = get_block_for_node(self); // Earliest legal placement 1208 1209 if (self->is_top()) { 1210 // Top node goes in bb #2 with other constants. 1211 // It must be special-cased, because it has no out edges. 1212 early->add_inst(self); 1213 continue; 1214 } 1215 1216 // No uses, just terminate 1217 if (self->outcnt() == 0) { 1218 assert(self->is_MachProj(), "sanity"); 1219 continue; // Must be a dead machine projection 1220 } 1221 1222 // If node is pinned in the block, then no scheduling can be done. 1223 if( self->pinned() ) // Pinned in block? 1224 continue; 1225 1226 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1227 if (mach) { 1228 switch (mach->ideal_Opcode()) { 1229 case Op_CreateEx: 1230 // Don't move exception creation 1231 early->add_inst(self); 1232 continue; 1233 break; 1234 case Op_CheckCastPP: 1235 // Don't move CheckCastPP nodes away from their input, if the input 1236 // is a rawptr (5071820). 1237 Node *def = self->in(1); 1238 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1239 early->add_inst(self); 1240 #ifdef ASSERT 1241 _raw_oops.push(def); 1242 #endif 1243 continue; 1244 } 1245 break; 1246 } 1247 } 1248 1249 // Gather LCA of all uses 1250 Block *LCA = NULL; 1251 { 1252 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1253 // For all uses, find LCA 1254 Node* use = self->fast_out(i); 1255 LCA = raise_LCA_above_use(LCA, use, self, this); 1256 } 1257 } // (Hide defs of imax, i from rest of block.) 1258 1259 // Place temps in the block of their use. This isn't a 1260 // requirement for correctness but it reduces useless 1261 // interference between temps and other nodes. 1262 if (mach != NULL && mach->is_MachTemp()) { 1263 map_node_to_block(self, LCA); 1264 LCA->add_inst(self); 1265 continue; 1266 } 1267 1268 // Check if 'self' could be anti-dependent on memory 1269 if (self->needs_anti_dependence_check()) { 1270 // Hoist LCA above possible-defs and insert anti-dependences to 1271 // defs in new LCA block. 1272 LCA = insert_anti_dependences(LCA, self); 1273 } 1274 1275 if (early->_dom_depth > LCA->_dom_depth) { 1276 // Somehow the LCA has moved above the earliest legal point. 1277 // (One way this can happen is via memory_early_block.) 1278 if (C->subsume_loads() == true && !C->failing()) { 1279 // Retry with subsume_loads == false 1280 // If this is the first failure, the sentinel string will "stick" 1281 // to the Compile object, and the C2Compiler will see it and retry. 1282 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1283 } else { 1284 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1285 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1286 } 1287 return; 1288 } 1289 1290 // If there is no opportunity to hoist, then we're done. 1291 // In stress mode, try to hoist even the single operations. 1292 bool try_to_hoist = StressGCM || (LCA != early); 1293 1294 // Must clone guys stay next to use; no hoisting allowed. 1295 // Also cannot hoist guys that alter memory or are otherwise not 1296 // allocatable (hoisting can make a value live longer, leading to 1297 // anti and output dependency problems which are normally resolved 1298 // by the register allocator giving everyone a different register). 1299 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1300 try_to_hoist = false; 1301 1302 Block* late = NULL; 1303 if (try_to_hoist) { 1304 // Now find the block with the least execution frequency. 1305 // Start at the latest schedule and work up to the earliest schedule 1306 // in the dominator tree. Thus the Node will dominate all its uses. 1307 late = hoist_to_cheaper_block(LCA, early, self); 1308 } else { 1309 // Just use the LCA of the uses. 1310 late = LCA; 1311 } 1312 1313 // Put the node into target block 1314 schedule_node_into_block(self, late); 1315 1316 #ifdef ASSERT 1317 if (self->needs_anti_dependence_check()) { 1318 // since precedence edges are only inserted when we're sure they 1319 // are needed make sure that after placement in a block we don't 1320 // need any new precedence edges. 1321 verify_anti_dependences(late, self); 1322 } 1323 #endif 1324 } // Loop until all nodes have been visited 1325 1326 } // end ScheduleLate 1327 1328 //------------------------------GlobalCodeMotion------------------------------- 1329 void PhaseCFG::global_code_motion() { 1330 ResourceMark rm; 1331 1332 #ifndef PRODUCT 1333 if (trace_opto_pipelining()) { 1334 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1335 } 1336 #endif 1337 1338 // Initialize the node to block mapping for things on the proj_list 1339 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1340 unmap_node_from_block(_matcher.get_projection(i)); 1341 } 1342 1343 // Set the basic block for Nodes pinned into blocks 1344 Arena* arena = Thread::current()->resource_area(); 1345 VectorSet visited(arena); 1346 schedule_pinned_nodes(visited); 1347 1348 // Find the earliest Block any instruction can be placed in. Some 1349 // instructions are pinned into Blocks. Unpinned instructions can 1350 // appear in last block in which all their inputs occur. 1351 visited.Clear(); 1352 Node_Stack stack(arena, (C->live_nodes() >> 2) + 16); // pre-grow 1353 if (!schedule_early(visited, stack)) { 1354 // Bailout without retry 1355 C->record_method_not_compilable("early schedule failed"); 1356 return; 1357 } 1358 1359 // Build Def-Use edges. 1360 // Compute the latency information (via backwards walk) for all the 1361 // instructions in the graph 1362 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1363 1364 if (C->do_scheduling()) { 1365 compute_latencies_backwards(visited, stack); 1366 } 1367 1368 // Now schedule all codes as LATE as possible. This is the LCA in the 1369 // dominator tree of all USES of a value. Pick the block with the least 1370 // loop nesting depth that is lowest in the dominator tree. 1371 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1372 schedule_late(visited, stack); 1373 if (C->failing()) { 1374 // schedule_late fails only when graph is incorrect. 1375 assert(!VerifyGraphEdges, "verification should have failed"); 1376 return; 1377 } 1378 1379 #ifndef PRODUCT 1380 if (trace_opto_pipelining()) { 1381 tty->print("\n---- Detect implicit null checks ----\n"); 1382 } 1383 #endif 1384 1385 // Detect implicit-null-check opportunities. Basically, find NULL checks 1386 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1387 // I can generate a memory op if there is not one nearby. 1388 if (C->is_method_compilation()) { 1389 // By reversing the loop direction we get a very minor gain on mpegaudio. 1390 // Feel free to revert to a forward loop for clarity. 1391 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1392 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1393 Node* proj = _matcher._null_check_tests[i]; 1394 Node* val = _matcher._null_check_tests[i + 1]; 1395 Block* block = get_block_for_node(proj); 1396 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1397 // The implicit_null_check will only perform the transformation 1398 // if the null branch is truly uncommon, *and* it leads to an 1399 // uncommon trap. Combined with the too_many_traps guards 1400 // above, this prevents SEGV storms reported in 6366351, 1401 // by recompiling offending methods without this optimization. 1402 } 1403 } 1404 1405 bool block_size_threshold_ok = false; 1406 intptr_t *recalc_pressure_nodes = NULL; 1407 if (OptoRegScheduling) { 1408 for (uint i = 0; i < number_of_blocks(); i++) { 1409 Block* block = get_block(i); 1410 if (block->number_of_nodes() > 10) { 1411 block_size_threshold_ok = true; 1412 break; 1413 } 1414 } 1415 } 1416 1417 // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it 1418 // is key to enabling this feature. 1419 PhaseChaitin regalloc(C->unique(), *this, _matcher, true); 1420 ResourceArea live_arena; // Arena for liveness 1421 ResourceMark rm_live(&live_arena); 1422 PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true); 1423 PhaseIFG ifg(&live_arena); 1424 if (OptoRegScheduling && block_size_threshold_ok) { 1425 regalloc.mark_ssa(); 1426 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); 1427 rm_live.reset_to_mark(); // Reclaim working storage 1428 IndexSet::reset_memory(C, &live_arena); 1429 uint node_size = regalloc._lrg_map.max_lrg_id(); 1430 ifg.init(node_size); // Empty IFG 1431 regalloc.set_ifg(ifg); 1432 regalloc.set_live(live); 1433 regalloc.gather_lrg_masks(false); // Collect LRG masks 1434 live.compute(node_size); // Compute liveness 1435 1436 recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size); 1437 for (uint i = 0; i < node_size; i++) { 1438 recalc_pressure_nodes[i] = 0; 1439 } 1440 } 1441 _regalloc = ®alloc; 1442 1443 #ifndef PRODUCT 1444 if (trace_opto_pipelining()) { 1445 tty->print("\n---- Start Local Scheduling ----\n"); 1446 } 1447 #endif 1448 1449 // Schedule locally. Right now a simple topological sort. 1450 // Later, do a real latency aware scheduler. 1451 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1452 visited.Clear(); 1453 for (uint i = 0; i < number_of_blocks(); i++) { 1454 Block* block = get_block(i); 1455 if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) { 1456 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1457 C->record_method_not_compilable("local schedule failed"); 1458 } 1459 _regalloc = NULL; 1460 return; 1461 } 1462 } 1463 _regalloc = NULL; 1464 1465 // If we inserted any instructions between a Call and his CatchNode, 1466 // clone the instructions on all paths below the Catch. 1467 for (uint i = 0; i < number_of_blocks(); i++) { 1468 Block* block = get_block(i); 1469 call_catch_cleanup(block); 1470 } 1471 1472 #ifndef PRODUCT 1473 if (trace_opto_pipelining()) { 1474 tty->print("\n---- After GlobalCodeMotion ----\n"); 1475 for (uint i = 0; i < number_of_blocks(); i++) { 1476 Block* block = get_block(i); 1477 block->dump(); 1478 } 1479 } 1480 #endif 1481 // Dead. 1482 _node_latency = (GrowableArray<uint> *)0xdeadbeef; 1483 } 1484 1485 bool PhaseCFG::do_global_code_motion() { 1486 1487 build_dominator_tree(); 1488 if (C->failing()) { 1489 return false; 1490 } 1491 1492 NOT_PRODUCT( C->verify_graph_edges(); ) 1493 1494 estimate_block_frequency(); 1495 1496 global_code_motion(); 1497 1498 if (C->failing()) { 1499 return false; 1500 } 1501 1502 return true; 1503 } 1504 1505 //------------------------------Estimate_Block_Frequency----------------------- 1506 // Estimate block frequencies based on IfNode probabilities. 1507 void PhaseCFG::estimate_block_frequency() { 1508 1509 // Force conditional branches leading to uncommon traps to be unlikely, 1510 // not because we get to the uncommon_trap with less relative frequency, 1511 // but because an uncommon_trap typically causes a deopt, so we only get 1512 // there once. 1513 if (C->do_freq_based_layout()) { 1514 Block_List worklist; 1515 Block* root_blk = get_block(0); 1516 for (uint i = 1; i < root_blk->num_preds(); i++) { 1517 Block *pb = get_block_for_node(root_blk->pred(i)); 1518 if (pb->has_uncommon_code()) { 1519 worklist.push(pb); 1520 } 1521 } 1522 while (worklist.size() > 0) { 1523 Block* uct = worklist.pop(); 1524 if (uct == get_root_block()) { 1525 continue; 1526 } 1527 for (uint i = 1; i < uct->num_preds(); i++) { 1528 Block *pb = get_block_for_node(uct->pred(i)); 1529 if (pb->_num_succs == 1) { 1530 worklist.push(pb); 1531 } else if (pb->num_fall_throughs() == 2) { 1532 pb->update_uncommon_branch(uct); 1533 } 1534 } 1535 } 1536 } 1537 1538 // Create the loop tree and calculate loop depth. 1539 _root_loop = create_loop_tree(); 1540 _root_loop->compute_loop_depth(0); 1541 1542 // Compute block frequency of each block, relative to a single loop entry. 1543 _root_loop->compute_freq(); 1544 1545 // Adjust all frequencies to be relative to a single method entry 1546 _root_loop->_freq = 1.0; 1547 _root_loop->scale_freq(); 1548 1549 // Save outmost loop frequency for LRG frequency threshold 1550 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1551 1552 // force paths ending at uncommon traps to be infrequent 1553 if (!C->do_freq_based_layout()) { 1554 Block_List worklist; 1555 Block* root_blk = get_block(0); 1556 for (uint i = 1; i < root_blk->num_preds(); i++) { 1557 Block *pb = get_block_for_node(root_blk->pred(i)); 1558 if (pb->has_uncommon_code()) { 1559 worklist.push(pb); 1560 } 1561 } 1562 while (worklist.size() > 0) { 1563 Block* uct = worklist.pop(); 1564 uct->_freq = PROB_MIN; 1565 for (uint i = 1; i < uct->num_preds(); i++) { 1566 Block *pb = get_block_for_node(uct->pred(i)); 1567 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1568 worklist.push(pb); 1569 } 1570 } 1571 } 1572 } 1573 1574 #ifdef ASSERT 1575 for (uint i = 0; i < number_of_blocks(); i++) { 1576 Block* b = get_block(i); 1577 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1578 } 1579 #endif 1580 1581 #ifndef PRODUCT 1582 if (PrintCFGBlockFreq) { 1583 tty->print_cr("CFG Block Frequencies"); 1584 _root_loop->dump_tree(); 1585 if (Verbose) { 1586 tty->print_cr("PhaseCFG dump"); 1587 dump(); 1588 tty->print_cr("Node dump"); 1589 _root->dump(99999); 1590 } 1591 } 1592 #endif 1593 } 1594 1595 //----------------------------create_loop_tree-------------------------------- 1596 // Create a loop tree from the CFG 1597 CFGLoop* PhaseCFG::create_loop_tree() { 1598 1599 #ifdef ASSERT 1600 assert(get_block(0) == get_root_block(), "first block should be root block"); 1601 for (uint i = 0; i < number_of_blocks(); i++) { 1602 Block* block = get_block(i); 1603 // Check that _loop field are clear...we could clear them if not. 1604 assert(block->_loop == NULL, "clear _loop expected"); 1605 // Sanity check that the RPO numbering is reflected in the _blocks array. 1606 // It doesn't have to be for the loop tree to be built, but if it is not, 1607 // then the blocks have been reordered since dom graph building...which 1608 // may question the RPO numbering 1609 assert(block->_rpo == i, "unexpected reverse post order number"); 1610 } 1611 #endif 1612 1613 int idct = 0; 1614 CFGLoop* root_loop = new CFGLoop(idct++); 1615 1616 Block_List worklist; 1617 1618 // Assign blocks to loops 1619 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1620 Block* block = get_block(i); 1621 1622 if (block->head()->is_Loop()) { 1623 Block* loop_head = block; 1624 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1625 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1626 Block* tail = get_block_for_node(tail_n); 1627 1628 // Defensively filter out Loop nodes for non-single-entry loops. 1629 // For all reasonable loops, the head occurs before the tail in RPO. 1630 if (i <= tail->_rpo) { 1631 1632 // The tail and (recursive) predecessors of the tail 1633 // are made members of a new loop. 1634 1635 assert(worklist.size() == 0, "nonempty worklist"); 1636 CFGLoop* nloop = new CFGLoop(idct++); 1637 assert(loop_head->_loop == NULL, "just checking"); 1638 loop_head->_loop = nloop; 1639 // Add to nloop so push_pred() will skip over inner loops 1640 nloop->add_member(loop_head); 1641 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1642 1643 while (worklist.size() > 0) { 1644 Block* member = worklist.pop(); 1645 if (member != loop_head) { 1646 for (uint j = 1; j < member->num_preds(); j++) { 1647 nloop->push_pred(member, j, worklist, this); 1648 } 1649 } 1650 } 1651 } 1652 } 1653 } 1654 1655 // Create a member list for each loop consisting 1656 // of both blocks and (immediate child) loops. 1657 for (uint i = 0; i < number_of_blocks(); i++) { 1658 Block* block = get_block(i); 1659 CFGLoop* lp = block->_loop; 1660 if (lp == NULL) { 1661 // Not assigned to a loop. Add it to the method's pseudo loop. 1662 block->_loop = root_loop; 1663 lp = root_loop; 1664 } 1665 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1666 lp->add_member(block); 1667 } 1668 if (lp != root_loop) { 1669 if (lp->parent() == NULL) { 1670 // Not a nested loop. Make it a child of the method's pseudo loop. 1671 root_loop->add_nested_loop(lp); 1672 } 1673 if (block == lp->head()) { 1674 // Add nested loop to member list of parent loop. 1675 lp->parent()->add_member(lp); 1676 } 1677 } 1678 } 1679 1680 return root_loop; 1681 } 1682 1683 //------------------------------push_pred-------------------------------------- 1684 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1685 Node* pred_n = blk->pred(i); 1686 Block* pred = cfg->get_block_for_node(pred_n); 1687 CFGLoop *pred_loop = pred->_loop; 1688 if (pred_loop == NULL) { 1689 // Filter out blocks for non-single-entry loops. 1690 // For all reasonable loops, the head occurs before the tail in RPO. 1691 if (pred->_rpo > head()->_rpo) { 1692 pred->_loop = this; 1693 worklist.push(pred); 1694 } 1695 } else if (pred_loop != this) { 1696 // Nested loop. 1697 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1698 pred_loop = pred_loop->_parent; 1699 } 1700 // Make pred's loop be a child 1701 if (pred_loop->_parent == NULL) { 1702 add_nested_loop(pred_loop); 1703 // Continue with loop entry predecessor. 1704 Block* pred_head = pred_loop->head(); 1705 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1706 assert(pred_head != head(), "loop head in only one loop"); 1707 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1708 } else { 1709 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1710 } 1711 } 1712 } 1713 1714 //------------------------------add_nested_loop-------------------------------- 1715 // Make cl a child of the current loop in the loop tree. 1716 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1717 assert(_parent == NULL, "no parent yet"); 1718 assert(cl != this, "not my own parent"); 1719 cl->_parent = this; 1720 CFGLoop* ch = _child; 1721 if (ch == NULL) { 1722 _child = cl; 1723 } else { 1724 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1725 ch->_sibling = cl; 1726 } 1727 } 1728 1729 //------------------------------compute_loop_depth----------------------------- 1730 // Store the loop depth in each CFGLoop object. 1731 // Recursively walk the children to do the same for them. 1732 void CFGLoop::compute_loop_depth(int depth) { 1733 _depth = depth; 1734 CFGLoop* ch = _child; 1735 while (ch != NULL) { 1736 ch->compute_loop_depth(depth + 1); 1737 ch = ch->_sibling; 1738 } 1739 } 1740 1741 //------------------------------compute_freq----------------------------------- 1742 // Compute the frequency of each block and loop, relative to a single entry 1743 // into the dominating loop head. 1744 void CFGLoop::compute_freq() { 1745 // Bottom up traversal of loop tree (visit inner loops first.) 1746 // Set loop head frequency to 1.0, then transitively 1747 // compute frequency for all successors in the loop, 1748 // as well as for each exit edge. Inner loops are 1749 // treated as single blocks with loop exit targets 1750 // as the successor blocks. 1751 1752 // Nested loops first 1753 CFGLoop* ch = _child; 1754 while (ch != NULL) { 1755 ch->compute_freq(); 1756 ch = ch->_sibling; 1757 } 1758 assert (_members.length() > 0, "no empty loops"); 1759 Block* hd = head(); 1760 hd->_freq = 1.0; 1761 for (int i = 0; i < _members.length(); i++) { 1762 CFGElement* s = _members.at(i); 1763 double freq = s->_freq; 1764 if (s->is_block()) { 1765 Block* b = s->as_Block(); 1766 for (uint j = 0; j < b->_num_succs; j++) { 1767 Block* sb = b->_succs[j]; 1768 update_succ_freq(sb, freq * b->succ_prob(j)); 1769 } 1770 } else { 1771 CFGLoop* lp = s->as_CFGLoop(); 1772 assert(lp->_parent == this, "immediate child"); 1773 for (int k = 0; k < lp->_exits.length(); k++) { 1774 Block* eb = lp->_exits.at(k).get_target(); 1775 double prob = lp->_exits.at(k).get_prob(); 1776 update_succ_freq(eb, freq * prob); 1777 } 1778 } 1779 } 1780 1781 // For all loops other than the outer, "method" loop, 1782 // sum and normalize the exit probability. The "method" loop 1783 // should keep the initial exit probability of 1, so that 1784 // inner blocks do not get erroneously scaled. 1785 if (_depth != 0) { 1786 // Total the exit probabilities for this loop. 1787 double exits_sum = 0.0f; 1788 for (int i = 0; i < _exits.length(); i++) { 1789 exits_sum += _exits.at(i).get_prob(); 1790 } 1791 1792 // Normalize the exit probabilities. Until now, the 1793 // probabilities estimate the possibility of exit per 1794 // a single loop iteration; afterward, they estimate 1795 // the probability of exit per loop entry. 1796 for (int i = 0; i < _exits.length(); i++) { 1797 Block* et = _exits.at(i).get_target(); 1798 float new_prob = 0.0f; 1799 if (_exits.at(i).get_prob() > 0.0f) { 1800 new_prob = _exits.at(i).get_prob() / exits_sum; 1801 } 1802 BlockProbPair bpp(et, new_prob); 1803 _exits.at_put(i, bpp); 1804 } 1805 1806 // Save the total, but guard against unreasonable probability, 1807 // as the value is used to estimate the loop trip count. 1808 // An infinite trip count would blur relative block 1809 // frequencies. 1810 if (exits_sum > 1.0f) exits_sum = 1.0; 1811 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1812 _exit_prob = exits_sum; 1813 } 1814 } 1815 1816 //------------------------------succ_prob------------------------------------- 1817 // Determine the probability of reaching successor 'i' from the receiver block. 1818 float Block::succ_prob(uint i) { 1819 int eidx = end_idx(); 1820 Node *n = get_node(eidx); // Get ending Node 1821 1822 int op = n->Opcode(); 1823 if (n->is_Mach()) { 1824 if (n->is_MachNullCheck()) { 1825 // Can only reach here if called after lcm. The original Op_If is gone, 1826 // so we attempt to infer the probability from one or both of the 1827 // successor blocks. 1828 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1829 // If either successor has only one predecessor, then the 1830 // probability estimate can be derived using the 1831 // relative frequency of the successor and this block. 1832 if (_succs[i]->num_preds() == 2) { 1833 return _succs[i]->_freq / _freq; 1834 } else if (_succs[1-i]->num_preds() == 2) { 1835 return 1 - (_succs[1-i]->_freq / _freq); 1836 } else { 1837 // Estimate using both successor frequencies 1838 float freq = _succs[i]->_freq; 1839 return freq / (freq + _succs[1-i]->_freq); 1840 } 1841 } 1842 op = n->as_Mach()->ideal_Opcode(); 1843 } 1844 1845 1846 // Switch on branch type 1847 switch( op ) { 1848 case Op_CountedLoopEnd: 1849 case Op_If: { 1850 assert (i < 2, "just checking"); 1851 // Conditionals pass on only part of their frequency 1852 float prob = n->as_MachIf()->_prob; 1853 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1854 // If succ[i] is the FALSE branch, invert path info 1855 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1856 return 1.0f - prob; // not taken 1857 } else { 1858 return prob; // taken 1859 } 1860 } 1861 1862 case Op_Jump: 1863 // Divide the frequency between all successors evenly 1864 return 1.0f/_num_succs; 1865 1866 case Op_Catch: { 1867 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1868 if (ci->_con == CatchProjNode::fall_through_index) { 1869 // Fall-thru path gets the lion's share. 1870 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1871 } else { 1872 // Presume exceptional paths are equally unlikely 1873 return PROB_UNLIKELY_MAG(5); 1874 } 1875 } 1876 1877 case Op_Root: 1878 case Op_Goto: 1879 // Pass frequency straight thru to target 1880 return 1.0f; 1881 1882 case Op_NeverBranch: 1883 return 0.0f; 1884 1885 case Op_TailCall: 1886 case Op_TailJump: 1887 case Op_Return: 1888 case Op_Halt: 1889 case Op_Rethrow: 1890 // Do not push out freq to root block 1891 return 0.0f; 1892 1893 default: 1894 ShouldNotReachHere(); 1895 } 1896 1897 return 0.0f; 1898 } 1899 1900 //------------------------------num_fall_throughs----------------------------- 1901 // Return the number of fall-through candidates for a block 1902 int Block::num_fall_throughs() { 1903 int eidx = end_idx(); 1904 Node *n = get_node(eidx); // Get ending Node 1905 1906 int op = n->Opcode(); 1907 if (n->is_Mach()) { 1908 if (n->is_MachNullCheck()) { 1909 // In theory, either side can fall-thru, for simplicity sake, 1910 // let's say only the false branch can now. 1911 return 1; 1912 } 1913 op = n->as_Mach()->ideal_Opcode(); 1914 } 1915 1916 // Switch on branch type 1917 switch( op ) { 1918 case Op_CountedLoopEnd: 1919 case Op_If: 1920 return 2; 1921 1922 case Op_Root: 1923 case Op_Goto: 1924 return 1; 1925 1926 case Op_Catch: { 1927 for (uint i = 0; i < _num_succs; i++) { 1928 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1929 if (ci->_con == CatchProjNode::fall_through_index) { 1930 return 1; 1931 } 1932 } 1933 return 0; 1934 } 1935 1936 case Op_Jump: 1937 case Op_NeverBranch: 1938 case Op_TailCall: 1939 case Op_TailJump: 1940 case Op_Return: 1941 case Op_Halt: 1942 case Op_Rethrow: 1943 return 0; 1944 1945 default: 1946 ShouldNotReachHere(); 1947 } 1948 1949 return 0; 1950 } 1951 1952 //------------------------------succ_fall_through----------------------------- 1953 // Return true if a specific successor could be fall-through target. 1954 bool Block::succ_fall_through(uint i) { 1955 int eidx = end_idx(); 1956 Node *n = get_node(eidx); // Get ending Node 1957 1958 int op = n->Opcode(); 1959 if (n->is_Mach()) { 1960 if (n->is_MachNullCheck()) { 1961 // In theory, either side can fall-thru, for simplicity sake, 1962 // let's say only the false branch can now. 1963 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1964 } 1965 op = n->as_Mach()->ideal_Opcode(); 1966 } 1967 1968 // Switch on branch type 1969 switch( op ) { 1970 case Op_CountedLoopEnd: 1971 case Op_If: 1972 case Op_Root: 1973 case Op_Goto: 1974 return true; 1975 1976 case Op_Catch: { 1977 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1978 return ci->_con == CatchProjNode::fall_through_index; 1979 } 1980 1981 case Op_Jump: 1982 case Op_NeverBranch: 1983 case Op_TailCall: 1984 case Op_TailJump: 1985 case Op_Return: 1986 case Op_Halt: 1987 case Op_Rethrow: 1988 return false; 1989 1990 default: 1991 ShouldNotReachHere(); 1992 } 1993 1994 return false; 1995 } 1996 1997 //------------------------------update_uncommon_branch------------------------ 1998 // Update the probability of a two-branch to be uncommon 1999 void Block::update_uncommon_branch(Block* ub) { 2000 int eidx = end_idx(); 2001 Node *n = get_node(eidx); // Get ending Node 2002 2003 int op = n->as_Mach()->ideal_Opcode(); 2004 2005 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 2006 assert(num_fall_throughs() == 2, "must be a two way branch block"); 2007 2008 // Which successor is ub? 2009 uint s; 2010 for (s = 0; s <_num_succs; s++) { 2011 if (_succs[s] == ub) break; 2012 } 2013 assert(s < 2, "uncommon successor must be found"); 2014 2015 // If ub is the true path, make the proability small, else 2016 // ub is the false path, and make the probability large 2017 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 2018 2019 // Get existing probability 2020 float p = n->as_MachIf()->_prob; 2021 2022 if (invert) p = 1.0 - p; 2023 if (p > PROB_MIN) { 2024 p = PROB_MIN; 2025 } 2026 if (invert) p = 1.0 - p; 2027 2028 n->as_MachIf()->_prob = p; 2029 } 2030 2031 //------------------------------update_succ_freq------------------------------- 2032 // Update the appropriate frequency associated with block 'b', a successor of 2033 // a block in this loop. 2034 void CFGLoop::update_succ_freq(Block* b, double freq) { 2035 if (b->_loop == this) { 2036 if (b == head()) { 2037 // back branch within the loop 2038 // Do nothing now, the loop carried frequency will be 2039 // adjust later in scale_freq(). 2040 } else { 2041 // simple branch within the loop 2042 b->_freq += freq; 2043 } 2044 } else if (!in_loop_nest(b)) { 2045 // branch is exit from this loop 2046 BlockProbPair bpp(b, freq); 2047 _exits.append(bpp); 2048 } else { 2049 // branch into nested loop 2050 CFGLoop* ch = b->_loop; 2051 ch->_freq += freq; 2052 } 2053 } 2054 2055 //------------------------------in_loop_nest----------------------------------- 2056 // Determine if block b is in the receiver's loop nest. 2057 bool CFGLoop::in_loop_nest(Block* b) { 2058 int depth = _depth; 2059 CFGLoop* b_loop = b->_loop; 2060 int b_depth = b_loop->_depth; 2061 if (depth == b_depth) { 2062 return true; 2063 } 2064 while (b_depth > depth) { 2065 b_loop = b_loop->_parent; 2066 b_depth = b_loop->_depth; 2067 } 2068 return b_loop == this; 2069 } 2070 2071 //------------------------------scale_freq------------------------------------- 2072 // Scale frequency of loops and blocks by trip counts from outer loops 2073 // Do a top down traversal of loop tree (visit outer loops first.) 2074 void CFGLoop::scale_freq() { 2075 double loop_freq = _freq * trip_count(); 2076 _freq = loop_freq; 2077 for (int i = 0; i < _members.length(); i++) { 2078 CFGElement* s = _members.at(i); 2079 double block_freq = s->_freq * loop_freq; 2080 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 2081 block_freq = MIN_BLOCK_FREQUENCY; 2082 s->_freq = block_freq; 2083 } 2084 CFGLoop* ch = _child; 2085 while (ch != NULL) { 2086 ch->scale_freq(); 2087 ch = ch->_sibling; 2088 } 2089 } 2090 2091 // Frequency of outer loop 2092 double CFGLoop::outer_loop_freq() const { 2093 if (_child != NULL) { 2094 return _child->_freq; 2095 } 2096 return _freq; 2097 } 2098 2099 #ifndef PRODUCT 2100 //------------------------------dump_tree-------------------------------------- 2101 void CFGLoop::dump_tree() const { 2102 dump(); 2103 if (_child != NULL) _child->dump_tree(); 2104 if (_sibling != NULL) _sibling->dump_tree(); 2105 } 2106 2107 //------------------------------dump------------------------------------------- 2108 void CFGLoop::dump() const { 2109 for (int i = 0; i < _depth; i++) tty->print(" "); 2110 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2111 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2112 for (int i = 0; i < _depth; i++) tty->print(" "); 2113 tty->print(" members:"); 2114 int k = 0; 2115 for (int i = 0; i < _members.length(); i++) { 2116 if (k++ >= 6) { 2117 tty->print("\n "); 2118 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2119 k = 0; 2120 } 2121 CFGElement *s = _members.at(i); 2122 if (s->is_block()) { 2123 Block *b = s->as_Block(); 2124 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2125 } else { 2126 CFGLoop* lp = s->as_CFGLoop(); 2127 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2128 } 2129 } 2130 tty->print("\n"); 2131 for (int i = 0; i < _depth; i++) tty->print(" "); 2132 tty->print(" exits: "); 2133 k = 0; 2134 for (int i = 0; i < _exits.length(); i++) { 2135 if (k++ >= 7) { 2136 tty->print("\n "); 2137 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2138 k = 0; 2139 } 2140 Block *blk = _exits.at(i).get_target(); 2141 double prob = _exits.at(i).get_prob(); 2142 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2143 } 2144 tty->print("\n"); 2145 } 2146 #endif