1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "runtime/deoptimization.hpp" 38 #if defined AD_MD_HPP 39 # include AD_MD_HPP 40 #elif defined TARGET_ARCH_MODEL_x86_32 41 # include "adfiles/ad_x86_32.hpp" 42 #elif defined TARGET_ARCH_MODEL_x86_64 43 # include "adfiles/ad_x86_64.hpp" 44 #elif defined TARGET_ARCH_MODEL_aarch64 45 # include "adfiles/ad_aarch64.hpp" 46 #elif defined TARGET_ARCH_MODEL_sparc 47 # include "adfiles/ad_sparc.hpp" 48 #elif defined TARGET_ARCH_MODEL_zero 49 # include "adfiles/ad_zero.hpp" 50 #elif defined TARGET_ARCH_MODEL_ppc_64 51 # include "adfiles/ad_ppc_64.hpp" 52 #endif 53 54 55 // Portions of code courtesy of Clifford Click 56 57 // Optimization - Graph Style 58 59 // To avoid float value underflow 60 #define MIN_BLOCK_FREQUENCY 1.e-35f 61 62 //----------------------------schedule_node_into_block------------------------- 63 // Insert node n into block b. Look for projections of n and make sure they 64 // are in b also. 65 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 66 // Set basic block of n, Add n to b, 67 map_node_to_block(n, b); 68 b->add_inst(n); 69 70 // After Matching, nearly any old Node may have projections trailing it. 71 // These are usually machine-dependent flags. In any case, they might 72 // float to another block below this one. Move them up. 73 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 74 Node* use = n->fast_out(i); 75 if (use->is_Proj()) { 76 Block* buse = get_block_for_node(use); 77 if (buse != b) { // In wrong block? 78 if (buse != NULL) { 79 buse->find_remove(use); // Remove from wrong block 80 } 81 map_node_to_block(use, b); 82 b->add_inst(use); 83 } 84 } 85 } 86 } 87 88 //----------------------------replace_block_proj_ctrl------------------------- 89 // Nodes that have is_block_proj() nodes as their control need to use 90 // the appropriate Region for their actual block as their control since 91 // the projection will be in a predecessor block. 92 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 93 const Node *in0 = n->in(0); 94 assert(in0 != NULL, "Only control-dependent"); 95 const Node *p = in0->is_block_proj(); 96 if (p != NULL && p != n) { // Control from a block projection? 97 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 98 // Find trailing Region 99 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 100 uint j = 0; 101 if (pb->_num_succs != 1) { // More then 1 successor? 102 // Search for successor 103 uint max = pb->number_of_nodes(); 104 assert( max > 1, "" ); 105 uint start = max - pb->_num_succs; 106 // Find which output path belongs to projection 107 for (j = start; j < max; j++) { 108 if( pb->get_node(j) == in0 ) 109 break; 110 } 111 assert( j < max, "must find" ); 112 // Change control to match head of successor basic block 113 j -= start; 114 } 115 n->set_req(0, pb->_succs[j]->head()); 116 } 117 } 118 119 120 static bool is_dominator(Block* d, Block* n) { 121 return d->dom_lca(n) == d; 122 } 123 //------------------------------schedule_pinned_nodes-------------------------- 124 // Set the basic block for Nodes pinned into blocks 125 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 126 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 127 GrowableArray <Node *> spstack(C->live_nodes() + 8); 128 spstack.push(_root); 129 while (spstack.is_nonempty()) { 130 Node* node = spstack.pop(); 131 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 132 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 133 assert(node->in(0), "pinned Node must have Control"); 134 // Before setting block replace block_proj control edge 135 replace_block_proj_ctrl(node); 136 Node* input = node->in(0); 137 while (!input->is_block_start()) { 138 input = input->in(0); 139 } 140 Block* block = get_block_for_node(input); // Basic block of controlling input 141 schedule_node_into_block(node, block); 142 } 143 // If the node has precedence edges (added when CastPP nodes are 144 // removed in final_graph_reshaping), fix the control of the 145 // node to cover the precedence edges and remove the 146 // dependencies. 147 Node* n = NULL; 148 for (uint i = node->len()-1; i >= node->req(); i--) { 149 Node* m = node->in(i); 150 if (m == NULL) continue; 151 // Skip the precedence edge if the test that guarded a CastPP: 152 // - was optimized out during escape analysis 153 // (OptimizePtrCompare): the CastPP's control isn't an end of 154 // block. 155 // - is moved in the branch of a dominating If: the control of 156 // the CastPP is then a Region. 157 if (m->is_block_proj() || m->is_block_start()) { 158 node->rm_prec(i); 159 if (n == NULL) { 160 n = m; 161 } else { 162 Block* bn = get_block_for_node(n); 163 Block* bm = get_block_for_node(m); 164 assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other"); 165 n = is_dominator(bn, bm) ? m : n; 166 } 167 } 168 } 169 if (n != NULL) { 170 assert(node->in(0), "control should have been set"); 171 Block* bn = get_block_for_node(n); 172 Block* bnode = get_block_for_node(node->in(0)); 173 assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other"); 174 if (!is_dominator(bn, bnode)) { 175 node->set_req(0, n); 176 } 177 } 178 179 // process all inputs that are non NULL 180 for (int i = node->req() - 1; i >= 0; --i) { 181 if (node->in(i) != NULL) { 182 spstack.push(node->in(i)); 183 } 184 } 185 } 186 } 187 } 188 189 #ifdef ASSERT 190 // Assert that new input b2 is dominated by all previous inputs. 191 // Check this by by seeing that it is dominated by b1, the deepest 192 // input observed until b2. 193 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 194 if (b1 == NULL) return; 195 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 196 Block* tmp = b2; 197 while (tmp != b1 && tmp != NULL) { 198 tmp = tmp->_idom; 199 } 200 if (tmp != b1) { 201 // Detected an unschedulable graph. Print some nice stuff and die. 202 tty->print_cr("!!! Unschedulable graph !!!"); 203 for (uint j=0; j<n->len(); j++) { // For all inputs 204 Node* inn = n->in(j); // Get input 205 if (inn == NULL) continue; // Ignore NULL, missing inputs 206 Block* inb = cfg->get_block_for_node(inn); 207 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 208 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 209 inn->dump(); 210 } 211 tty->print("Failing node: "); 212 n->dump(); 213 assert(false, "unscheduable graph"); 214 } 215 } 216 #endif 217 218 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 219 // Find the last input dominated by all other inputs. 220 Block* deepb = NULL; // Deepest block so far 221 int deepb_dom_depth = 0; 222 for (uint k = 0; k < n->len(); k++) { // For all inputs 223 Node* inn = n->in(k); // Get input 224 if (inn == NULL) continue; // Ignore NULL, missing inputs 225 Block* inb = cfg->get_block_for_node(inn); 226 assert(inb != NULL, "must already have scheduled this input"); 227 if (deepb_dom_depth < (int) inb->_dom_depth) { 228 // The new inb must be dominated by the previous deepb. 229 // The various inputs must be linearly ordered in the dom 230 // tree, or else there will not be a unique deepest block. 231 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 232 deepb = inb; // Save deepest block 233 deepb_dom_depth = deepb->_dom_depth; 234 } 235 } 236 assert(deepb != NULL, "must be at least one input to n"); 237 return deepb; 238 } 239 240 241 //------------------------------schedule_early--------------------------------- 242 // Find the earliest Block any instruction can be placed in. Some instructions 243 // are pinned into Blocks. Unpinned instructions can appear in last block in 244 // which all their inputs occur. 245 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 246 // Allocate stack with enough space to avoid frequent realloc 247 Node_Stack nstack(roots.Size() + 8); 248 // _root will be processed among C->top() inputs 249 roots.push(C->top()); 250 visited.set(C->top()->_idx); 251 252 while (roots.size() != 0) { 253 // Use local variables nstack_top_n & nstack_top_i to cache values 254 // on stack's top. 255 Node* parent_node = roots.pop(); 256 uint input_index = 0; 257 258 while (true) { 259 if (input_index == 0) { 260 // Fixup some control. Constants without control get attached 261 // to root and nodes that use is_block_proj() nodes should be attached 262 // to the region that starts their block. 263 const Node* control_input = parent_node->in(0); 264 if (control_input != NULL) { 265 replace_block_proj_ctrl(parent_node); 266 } else { 267 // Is a constant with NO inputs? 268 if (parent_node->req() == 1) { 269 parent_node->set_req(0, _root); 270 } 271 } 272 } 273 274 // First, visit all inputs and force them to get a block. If an 275 // input is already in a block we quit following inputs (to avoid 276 // cycles). Instead we put that Node on a worklist to be handled 277 // later (since IT'S inputs may not have a block yet). 278 279 // Assume all n's inputs will be processed 280 bool done = true; 281 282 while (input_index < parent_node->len()) { 283 Node* in = parent_node->in(input_index++); 284 if (in == NULL) { 285 continue; 286 } 287 288 int is_visited = visited.test_set(in->_idx); 289 if (!has_block(in)) { 290 if (is_visited) { 291 assert(false, "graph should be schedulable"); 292 return false; 293 } 294 // Save parent node and next input's index. 295 nstack.push(parent_node, input_index); 296 // Process current input now. 297 parent_node = in; 298 input_index = 0; 299 // Not all n's inputs processed. 300 done = false; 301 break; 302 } else if (!is_visited) { 303 // Visit this guy later, using worklist 304 roots.push(in); 305 } 306 } 307 308 if (done) { 309 // All of n's inputs have been processed, complete post-processing. 310 311 // Some instructions are pinned into a block. These include Region, 312 // Phi, Start, Return, and other control-dependent instructions and 313 // any projections which depend on them. 314 if (!parent_node->pinned()) { 315 // Set earliest legal block. 316 Block* earliest_block = find_deepest_input(parent_node, this); 317 map_node_to_block(parent_node, earliest_block); 318 } else { 319 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 320 } 321 322 if (nstack.is_empty()) { 323 // Finished all nodes on stack. 324 // Process next node on the worklist 'roots'. 325 break; 326 } 327 // Get saved parent node and next input's index. 328 parent_node = nstack.node(); 329 input_index = nstack.index(); 330 nstack.pop(); 331 } 332 } 333 } 334 return true; 335 } 336 337 //------------------------------dom_lca---------------------------------------- 338 // Find least common ancestor in dominator tree 339 // LCA is a current notion of LCA, to be raised above 'this'. 340 // As a convenient boundary condition, return 'this' if LCA is NULL. 341 // Find the LCA of those two nodes. 342 Block* Block::dom_lca(Block* LCA) { 343 if (LCA == NULL || LCA == this) return this; 344 345 Block* anc = this; 346 while (anc->_dom_depth > LCA->_dom_depth) 347 anc = anc->_idom; // Walk up till anc is as high as LCA 348 349 while (LCA->_dom_depth > anc->_dom_depth) 350 LCA = LCA->_idom; // Walk up till LCA is as high as anc 351 352 while (LCA != anc) { // Walk both up till they are the same 353 LCA = LCA->_idom; 354 anc = anc->_idom; 355 } 356 357 return LCA; 358 } 359 360 //--------------------------raise_LCA_above_use-------------------------------- 361 // We are placing a definition, and have been given a def->use edge. 362 // The definition must dominate the use, so move the LCA upward in the 363 // dominator tree to dominate the use. If the use is a phi, adjust 364 // the LCA only with the phi input paths which actually use this def. 365 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 366 Block* buse = cfg->get_block_for_node(use); 367 if (buse == NULL) return LCA; // Unused killing Projs have no use block 368 if (!use->is_Phi()) return buse->dom_lca(LCA); 369 uint pmax = use->req(); // Number of Phi inputs 370 // Why does not this loop just break after finding the matching input to 371 // the Phi? Well...it's like this. I do not have true def-use/use-def 372 // chains. Means I cannot distinguish, from the def-use direction, which 373 // of many use-defs lead from the same use to the same def. That is, this 374 // Phi might have several uses of the same def. Each use appears in a 375 // different predecessor block. But when I enter here, I cannot distinguish 376 // which use-def edge I should find the predecessor block for. So I find 377 // them all. Means I do a little extra work if a Phi uses the same value 378 // more than once. 379 for (uint j=1; j<pmax; j++) { // For all inputs 380 if (use->in(j) == def) { // Found matching input? 381 Block* pred = cfg->get_block_for_node(buse->pred(j)); 382 LCA = pred->dom_lca(LCA); 383 } 384 } 385 return LCA; 386 } 387 388 //----------------------------raise_LCA_above_marks---------------------------- 389 // Return a new LCA that dominates LCA and any of its marked predecessors. 390 // Search all my parents up to 'early' (exclusive), looking for predecessors 391 // which are marked with the given index. Return the LCA (in the dom tree) 392 // of all marked blocks. If there are none marked, return the original 393 // LCA. 394 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 395 Block_List worklist; 396 worklist.push(LCA); 397 while (worklist.size() > 0) { 398 Block* mid = worklist.pop(); 399 if (mid == early) continue; // stop searching here 400 401 // Test and set the visited bit. 402 if (mid->raise_LCA_visited() == mark) continue; // already visited 403 404 // Don't process the current LCA, otherwise the search may terminate early 405 if (mid != LCA && mid->raise_LCA_mark() == mark) { 406 // Raise the LCA. 407 LCA = mid->dom_lca(LCA); 408 if (LCA == early) break; // stop searching everywhere 409 assert(early->dominates(LCA), "early is high enough"); 410 // Resume searching at that point, skipping intermediate levels. 411 worklist.push(LCA); 412 if (LCA == mid) 413 continue; // Don't mark as visited to avoid early termination. 414 } else { 415 // Keep searching through this block's predecessors. 416 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 417 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 418 worklist.push(mid_parent); 419 } 420 } 421 mid->set_raise_LCA_visited(mark); 422 } 423 return LCA; 424 } 425 426 //--------------------------memory_early_block-------------------------------- 427 // This is a variation of find_deepest_input, the heart of schedule_early. 428 // Find the "early" block for a load, if we considered only memory and 429 // address inputs, that is, if other data inputs were ignored. 430 // 431 // Because a subset of edges are considered, the resulting block will 432 // be earlier (at a shallower dom_depth) than the true schedule_early 433 // point of the node. We compute this earlier block as a more permissive 434 // site for anti-dependency insertion, but only if subsume_loads is enabled. 435 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 436 Node* base; 437 Node* index; 438 Node* store = load->in(MemNode::Memory); 439 load->as_Mach()->memory_inputs(base, index); 440 441 assert(base != NodeSentinel && index != NodeSentinel, 442 "unexpected base/index inputs"); 443 444 Node* mem_inputs[4]; 445 int mem_inputs_length = 0; 446 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 447 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 448 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 449 450 // In the comparision below, add one to account for the control input, 451 // which may be null, but always takes up a spot in the in array. 452 if (mem_inputs_length + 1 < (int) load->req()) { 453 // This "load" has more inputs than just the memory, base and index inputs. 454 // For purposes of checking anti-dependences, we need to start 455 // from the early block of only the address portion of the instruction, 456 // and ignore other blocks that may have factored into the wider 457 // schedule_early calculation. 458 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 459 460 Block* deepb = NULL; // Deepest block so far 461 int deepb_dom_depth = 0; 462 for (int i = 0; i < mem_inputs_length; i++) { 463 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 464 if (deepb_dom_depth < (int) inb->_dom_depth) { 465 // The new inb must be dominated by the previous deepb. 466 // The various inputs must be linearly ordered in the dom 467 // tree, or else there will not be a unique deepest block. 468 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 469 deepb = inb; // Save deepest block 470 deepb_dom_depth = deepb->_dom_depth; 471 } 472 } 473 early = deepb; 474 } 475 476 return early; 477 } 478 479 //--------------------------insert_anti_dependences--------------------------- 480 // A load may need to witness memory that nearby stores can overwrite. 481 // For each nearby store, either insert an "anti-dependence" edge 482 // from the load to the store, or else move LCA upward to force the 483 // load to (eventually) be scheduled in a block above the store. 484 // 485 // Do not add edges to stores on distinct control-flow paths; 486 // only add edges to stores which might interfere. 487 // 488 // Return the (updated) LCA. There will not be any possibly interfering 489 // store between the load's "early block" and the updated LCA. 490 // Any stores in the updated LCA will have new precedence edges 491 // back to the load. The caller is expected to schedule the load 492 // in the LCA, in which case the precedence edges will make LCM 493 // preserve anti-dependences. The caller may also hoist the load 494 // above the LCA, if it is not the early block. 495 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 496 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 497 assert(LCA != NULL, ""); 498 DEBUG_ONLY(Block* LCA_orig = LCA); 499 500 // Compute the alias index. Loads and stores with different alias indices 501 // do not need anti-dependence edges. 502 uint load_alias_idx = C->get_alias_index(load->adr_type()); 503 #ifdef ASSERT 504 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 505 (PrintOpto || VerifyAliases || 506 PrintMiscellaneous && (WizardMode || Verbose))) { 507 // Load nodes should not consume all of memory. 508 // Reporting a bottom type indicates a bug in adlc. 509 // If some particular type of node validly consumes all of memory, 510 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 511 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 512 load->dump(2); 513 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 514 } 515 #endif 516 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 517 "String compare is only known 'load' that does not conflict with any stores"); 518 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 519 "String equals is a 'load' that does not conflict with any stores"); 520 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 521 "String indexOf is a 'load' that does not conflict with any stores"); 522 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 523 "Arrays equals is a 'load' that do not conflict with any stores"); 524 525 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 526 // It is impossible to spoil this load by putting stores before it, 527 // because we know that the stores will never update the value 528 // which 'load' must witness. 529 return LCA; 530 } 531 532 node_idx_t load_index = load->_idx; 533 534 // Note the earliest legal placement of 'load', as determined by 535 // by the unique point in the dom tree where all memory effects 536 // and other inputs are first available. (Computed by schedule_early.) 537 // For normal loads, 'early' is the shallowest place (dom graph wise) 538 // to look for anti-deps between this load and any store. 539 Block* early = get_block_for_node(load); 540 541 // If we are subsuming loads, compute an "early" block that only considers 542 // memory or address inputs. This block may be different than the 543 // schedule_early block in that it could be at an even shallower depth in the 544 // dominator tree, and allow for a broader discovery of anti-dependences. 545 if (C->subsume_loads()) { 546 early = memory_early_block(load, early, this); 547 } 548 549 ResourceArea *area = Thread::current()->resource_area(); 550 Node_List worklist_mem(area); // prior memory state to store 551 Node_List worklist_store(area); // possible-def to explore 552 Node_List worklist_visited(area); // visited mergemem nodes 553 Node_List non_early_stores(area); // all relevant stores outside of early 554 bool must_raise_LCA = false; 555 556 #ifdef TRACK_PHI_INPUTS 557 // %%% This extra checking fails because MergeMem nodes are not GVNed. 558 // Provide "phi_inputs" to check if every input to a PhiNode is from the 559 // original memory state. This indicates a PhiNode for which should not 560 // prevent the load from sinking. For such a block, set_raise_LCA_mark 561 // may be overly conservative. 562 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 563 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 564 #endif 565 566 // 'load' uses some memory state; look for users of the same state. 567 // Recurse through MergeMem nodes to the stores that use them. 568 569 // Each of these stores is a possible definition of memory 570 // that 'load' needs to use. We need to force 'load' 571 // to occur before each such store. When the store is in 572 // the same block as 'load', we insert an anti-dependence 573 // edge load->store. 574 575 // The relevant stores "nearby" the load consist of a tree rooted 576 // at initial_mem, with internal nodes of type MergeMem. 577 // Therefore, the branches visited by the worklist are of this form: 578 // initial_mem -> (MergeMem ->)* store 579 // The anti-dependence constraints apply only to the fringe of this tree. 580 581 Node* initial_mem = load->in(MemNode::Memory); 582 worklist_store.push(initial_mem); 583 worklist_visited.push(initial_mem); 584 worklist_mem.push(NULL); 585 while (worklist_store.size() > 0) { 586 // Examine a nearby store to see if it might interfere with our load. 587 Node* mem = worklist_mem.pop(); 588 Node* store = worklist_store.pop(); 589 uint op = store->Opcode(); 590 591 // MergeMems do not directly have anti-deps. 592 // Treat them as internal nodes in a forward tree of memory states, 593 // the leaves of which are each a 'possible-def'. 594 if (store == initial_mem // root (exclusive) of tree we are searching 595 || op == Op_MergeMem // internal node of tree we are searching 596 ) { 597 mem = store; // It's not a possibly interfering store. 598 if (store == initial_mem) 599 initial_mem = NULL; // only process initial memory once 600 601 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 602 store = mem->fast_out(i); 603 if (store->is_MergeMem()) { 604 // Be sure we don't get into combinatorial problems. 605 // (Allow phis to be repeated; they can merge two relevant states.) 606 uint j = worklist_visited.size(); 607 for (; j > 0; j--) { 608 if (worklist_visited.at(j-1) == store) break; 609 } 610 if (j > 0) continue; // already on work list; do not repeat 611 worklist_visited.push(store); 612 } 613 worklist_mem.push(mem); 614 worklist_store.push(store); 615 } 616 continue; 617 } 618 619 if (op == Op_MachProj || op == Op_Catch) continue; 620 if (store->needs_anti_dependence_check()) continue; // not really a store 621 622 // Compute the alias index. Loads and stores with different alias 623 // indices do not need anti-dependence edges. Wide MemBar's are 624 // anti-dependent on everything (except immutable memories). 625 const TypePtr* adr_type = store->adr_type(); 626 if (!C->can_alias(adr_type, load_alias_idx)) continue; 627 628 // Most slow-path runtime calls do NOT modify Java memory, but 629 // they can block and so write Raw memory. 630 if (store->is_Mach()) { 631 MachNode* mstore = store->as_Mach(); 632 if (load_alias_idx != Compile::AliasIdxRaw) { 633 // Check for call into the runtime using the Java calling 634 // convention (and from there into a wrapper); it has no 635 // _method. Can't do this optimization for Native calls because 636 // they CAN write to Java memory. 637 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 638 assert(mstore->is_MachSafePoint(), ""); 639 MachSafePointNode* ms = (MachSafePointNode*) mstore; 640 assert(ms->is_MachCallJava(), ""); 641 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 642 if (mcj->_method == NULL) { 643 // These runtime calls do not write to Java visible memory 644 // (other than Raw) and so do not require anti-dependence edges. 645 continue; 646 } 647 } 648 // Same for SafePoints: they read/write Raw but only read otherwise. 649 // This is basically a workaround for SafePoints only defining control 650 // instead of control + memory. 651 if (mstore->ideal_Opcode() == Op_SafePoint) 652 continue; 653 } else { 654 // Some raw memory, such as the load of "top" at an allocation, 655 // can be control dependent on the previous safepoint. See 656 // comments in GraphKit::allocate_heap() about control input. 657 // Inserting an anti-dep between such a safepoint and a use 658 // creates a cycle, and will cause a subsequent failure in 659 // local scheduling. (BugId 4919904) 660 // (%%% How can a control input be a safepoint and not a projection??) 661 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 662 continue; 663 } 664 } 665 666 // Identify a block that the current load must be above, 667 // or else observe that 'store' is all the way up in the 668 // earliest legal block for 'load'. In the latter case, 669 // immediately insert an anti-dependence edge. 670 Block* store_block = get_block_for_node(store); 671 assert(store_block != NULL, "unused killing projections skipped above"); 672 673 if (store->is_Phi()) { 674 // 'load' uses memory which is one (or more) of the Phi's inputs. 675 // It must be scheduled not before the Phi, but rather before 676 // each of the relevant Phi inputs. 677 // 678 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 679 // we mark each corresponding predecessor block and do a combined 680 // hoisting operation later (raise_LCA_above_marks). 681 // 682 // Do not assert(store_block != early, "Phi merging memory after access") 683 // PhiNode may be at start of block 'early' with backedge to 'early' 684 DEBUG_ONLY(bool found_match = false); 685 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 686 if (store->in(j) == mem) { // Found matching input? 687 DEBUG_ONLY(found_match = true); 688 Block* pred_block = get_block_for_node(store_block->pred(j)); 689 if (pred_block != early) { 690 // If any predecessor of the Phi matches the load's "early block", 691 // we do not need a precedence edge between the Phi and 'load' 692 // since the load will be forced into a block preceding the Phi. 693 pred_block->set_raise_LCA_mark(load_index); 694 assert(!LCA_orig->dominates(pred_block) || 695 early->dominates(pred_block), "early is high enough"); 696 must_raise_LCA = true; 697 } else { 698 // anti-dependent upon PHI pinned below 'early', no edge needed 699 LCA = early; // but can not schedule below 'early' 700 } 701 } 702 } 703 assert(found_match, "no worklist bug"); 704 #ifdef TRACK_PHI_INPUTS 705 #ifdef ASSERT 706 // This assert asks about correct handling of PhiNodes, which may not 707 // have all input edges directly from 'mem'. See BugId 4621264 708 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 709 // Increment by exactly one even if there are multiple copies of 'mem' 710 // coming into the phi, because we will run this block several times 711 // if there are several copies of 'mem'. (That's how DU iterators work.) 712 phi_inputs.at_put(store->_idx, num_mem_inputs); 713 assert(PhiNode::Input + num_mem_inputs < store->req(), 714 "Expect at least one phi input will not be from original memory state"); 715 #endif //ASSERT 716 #endif //TRACK_PHI_INPUTS 717 } else if (store_block != early) { 718 // 'store' is between the current LCA and earliest possible block. 719 // Label its block, and decide later on how to raise the LCA 720 // to include the effect on LCA of this store. 721 // If this store's block gets chosen as the raised LCA, we 722 // will find him on the non_early_stores list and stick him 723 // with a precedence edge. 724 // (But, don't bother if LCA is already raised all the way.) 725 if (LCA != early) { 726 store_block->set_raise_LCA_mark(load_index); 727 must_raise_LCA = true; 728 non_early_stores.push(store); 729 } 730 } else { 731 // Found a possibly-interfering store in the load's 'early' block. 732 // This means 'load' cannot sink at all in the dominator tree. 733 // Add an anti-dep edge, and squeeze 'load' into the highest block. 734 assert(store != load->in(0), "dependence cycle found"); 735 if (verify) { 736 assert(store->find_edge(load) != -1, "missing precedence edge"); 737 } else { 738 store->add_prec(load); 739 } 740 LCA = early; 741 // This turns off the process of gathering non_early_stores. 742 } 743 } 744 // (Worklist is now empty; all nearby stores have been visited.) 745 746 // Finished if 'load' must be scheduled in its 'early' block. 747 // If we found any stores there, they have already been given 748 // precedence edges. 749 if (LCA == early) return LCA; 750 751 // We get here only if there are no possibly-interfering stores 752 // in the load's 'early' block. Move LCA up above all predecessors 753 // which contain stores we have noted. 754 // 755 // The raised LCA block can be a home to such interfering stores, 756 // but its predecessors must not contain any such stores. 757 // 758 // The raised LCA will be a lower bound for placing the load, 759 // preventing the load from sinking past any block containing 760 // a store that may invalidate the memory state required by 'load'. 761 if (must_raise_LCA) 762 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 763 if (LCA == early) return LCA; 764 765 // Insert anti-dependence edges from 'load' to each store 766 // in the non-early LCA block. 767 // Mine the non_early_stores list for such stores. 768 if (LCA->raise_LCA_mark() == load_index) { 769 while (non_early_stores.size() > 0) { 770 Node* store = non_early_stores.pop(); 771 Block* store_block = get_block_for_node(store); 772 if (store_block == LCA) { 773 // add anti_dependence from store to load in its own block 774 assert(store != load->in(0), "dependence cycle found"); 775 if (verify) { 776 assert(store->find_edge(load) != -1, "missing precedence edge"); 777 } else { 778 store->add_prec(load); 779 } 780 } else { 781 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 782 // Any other stores we found must be either inside the new LCA 783 // or else outside the original LCA. In the latter case, they 784 // did not interfere with any use of 'load'. 785 assert(LCA->dominates(store_block) 786 || !LCA_orig->dominates(store_block), "no stray stores"); 787 } 788 } 789 } 790 791 // Return the highest block containing stores; any stores 792 // within that block have been given anti-dependence edges. 793 return LCA; 794 } 795 796 // This class is used to iterate backwards over the nodes in the graph. 797 798 class Node_Backward_Iterator { 799 800 private: 801 Node_Backward_Iterator(); 802 803 public: 804 // Constructor for the iterator 805 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg); 806 807 // Postincrement operator to iterate over the nodes 808 Node *next(); 809 810 private: 811 VectorSet &_visited; 812 Node_List &_stack; 813 PhaseCFG &_cfg; 814 }; 815 816 // Constructor for the Node_Backward_Iterator 817 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg) 818 : _visited(visited), _stack(stack), _cfg(cfg) { 819 // The stack should contain exactly the root 820 stack.clear(); 821 stack.push(root); 822 823 // Clear the visited bits 824 visited.Clear(); 825 } 826 827 // Iterator for the Node_Backward_Iterator 828 Node *Node_Backward_Iterator::next() { 829 830 // If the _stack is empty, then just return NULL: finished. 831 if ( !_stack.size() ) 832 return NULL; 833 834 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 835 // made stateless, so I do not need to record the index 'i' on my _stack. 836 // Instead I visit all users each time, scanning for unvisited users. 837 // I visit unvisited not-anti-dependence users first, then anti-dependent 838 // children next. 839 Node *self = _stack.pop(); 840 841 // I cycle here when I am entering a deeper level of recursion. 842 // The key variable 'self' was set prior to jumping here. 843 while( 1 ) { 844 845 _visited.set(self->_idx); 846 847 // Now schedule all uses as late as possible. 848 const Node* src = self->is_Proj() ? self->in(0) : self; 849 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 850 851 // Schedule all nodes in a post-order visit 852 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 853 854 // Scan for unvisited nodes 855 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 856 // For all uses, schedule late 857 Node* n = self->fast_out(i); // Use 858 859 // Skip already visited children 860 if ( _visited.test(n->_idx) ) 861 continue; 862 863 // do not traverse backward control edges 864 Node *use = n->is_Proj() ? n->in(0) : n; 865 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 866 867 if ( use_rpo < src_rpo ) 868 continue; 869 870 // Phi nodes always precede uses in a basic block 871 if ( use_rpo == src_rpo && use->is_Phi() ) 872 continue; 873 874 unvisited = n; // Found unvisited 875 876 // Check for possible-anti-dependent 877 if( !n->needs_anti_dependence_check() ) 878 break; // Not visited, not anti-dep; schedule it NOW 879 } 880 881 // Did I find an unvisited not-anti-dependent Node? 882 if ( !unvisited ) 883 break; // All done with children; post-visit 'self' 884 885 // Visit the unvisited Node. Contains the obvious push to 886 // indicate I'm entering a deeper level of recursion. I push the 887 // old state onto the _stack and set a new state and loop (recurse). 888 _stack.push(self); 889 self = unvisited; 890 } // End recursion loop 891 892 return self; 893 } 894 895 //------------------------------ComputeLatenciesBackwards---------------------- 896 // Compute the latency of all the instructions. 897 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { 898 #ifndef PRODUCT 899 if (trace_opto_pipelining()) 900 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 901 #endif 902 903 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 904 Node *n; 905 906 // Walk over all the nodes from last to first 907 while (n = iter.next()) { 908 // Set the latency for the definitions of this instruction 909 partial_latency_of_defs(n); 910 } 911 } // end ComputeLatenciesBackwards 912 913 //------------------------------partial_latency_of_defs------------------------ 914 // Compute the latency impact of this node on all defs. This computes 915 // a number that increases as we approach the beginning of the routine. 916 void PhaseCFG::partial_latency_of_defs(Node *n) { 917 // Set the latency for this instruction 918 #ifndef PRODUCT 919 if (trace_opto_pipelining()) { 920 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 921 dump(); 922 } 923 #endif 924 925 if (n->is_Proj()) { 926 n = n->in(0); 927 } 928 929 if (n->is_Root()) { 930 return; 931 } 932 933 uint nlen = n->len(); 934 uint use_latency = get_latency_for_node(n); 935 uint use_pre_order = get_block_for_node(n)->_pre_order; 936 937 for (uint j = 0; j < nlen; j++) { 938 Node *def = n->in(j); 939 940 if (!def || def == n) { 941 continue; 942 } 943 944 // Walk backwards thru projections 945 if (def->is_Proj()) { 946 def = def->in(0); 947 } 948 949 #ifndef PRODUCT 950 if (trace_opto_pipelining()) { 951 tty->print("# in(%2d): ", j); 952 def->dump(); 953 } 954 #endif 955 956 // If the defining block is not known, assume it is ok 957 Block *def_block = get_block_for_node(def); 958 uint def_pre_order = def_block ? def_block->_pre_order : 0; 959 960 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 961 continue; 962 } 963 964 uint delta_latency = n->latency(j); 965 uint current_latency = delta_latency + use_latency; 966 967 if (get_latency_for_node(def) < current_latency) { 968 set_latency_for_node(def, current_latency); 969 } 970 971 #ifndef PRODUCT 972 if (trace_opto_pipelining()) { 973 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 974 } 975 #endif 976 } 977 } 978 979 //------------------------------latency_from_use------------------------------- 980 // Compute the latency of a specific use 981 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 982 // If self-reference, return no latency 983 if (use == n || use->is_Root()) { 984 return 0; 985 } 986 987 uint def_pre_order = get_block_for_node(def)->_pre_order; 988 uint latency = 0; 989 990 // If the use is not a projection, then it is simple... 991 if (!use->is_Proj()) { 992 #ifndef PRODUCT 993 if (trace_opto_pipelining()) { 994 tty->print("# out(): "); 995 use->dump(); 996 } 997 #endif 998 999 uint use_pre_order = get_block_for_node(use)->_pre_order; 1000 1001 if (use_pre_order < def_pre_order) 1002 return 0; 1003 1004 if (use_pre_order == def_pre_order && use->is_Phi()) 1005 return 0; 1006 1007 uint nlen = use->len(); 1008 uint nl = get_latency_for_node(use); 1009 1010 for ( uint j=0; j<nlen; j++ ) { 1011 if (use->in(j) == n) { 1012 // Change this if we want local latencies 1013 uint ul = use->latency(j); 1014 uint l = ul + nl; 1015 if (latency < l) latency = l; 1016 #ifndef PRODUCT 1017 if (trace_opto_pipelining()) { 1018 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 1019 nl, j, ul, l, latency); 1020 } 1021 #endif 1022 } 1023 } 1024 } else { 1025 // This is a projection, just grab the latency of the use(s) 1026 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1027 uint l = latency_from_use(use, def, use->fast_out(j)); 1028 if (latency < l) latency = l; 1029 } 1030 } 1031 1032 return latency; 1033 } 1034 1035 //------------------------------latency_from_uses------------------------------ 1036 // Compute the latency of this instruction relative to all of it's uses. 1037 // This computes a number that increases as we approach the beginning of the 1038 // routine. 1039 void PhaseCFG::latency_from_uses(Node *n) { 1040 // Set the latency for this instruction 1041 #ifndef PRODUCT 1042 if (trace_opto_pipelining()) { 1043 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1044 dump(); 1045 } 1046 #endif 1047 uint latency=0; 1048 const Node *def = n->is_Proj() ? n->in(0): n; 1049 1050 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1051 uint l = latency_from_use(n, def, n->fast_out(i)); 1052 1053 if (latency < l) latency = l; 1054 } 1055 1056 set_latency_for_node(n, latency); 1057 } 1058 1059 //------------------------------hoist_to_cheaper_block------------------------- 1060 // Pick a block for node self, between early and LCA, that is a cheaper 1061 // alternative to LCA. 1062 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1063 const double delta = 1+PROB_UNLIKELY_MAG(4); 1064 Block* least = LCA; 1065 double least_freq = least->_freq; 1066 uint target = get_latency_for_node(self); 1067 uint start_latency = get_latency_for_node(LCA->head()); 1068 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1069 bool in_latency = (target <= start_latency); 1070 const Block* root_block = get_block_for_node(_root); 1071 1072 // Turn off latency scheduling if scheduling is just plain off 1073 if (!C->do_scheduling()) 1074 in_latency = true; 1075 1076 // Do not hoist (to cover latency) instructions which target a 1077 // single register. Hoisting stretches the live range of the 1078 // single register and may force spilling. 1079 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1080 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1081 in_latency = true; 1082 1083 #ifndef PRODUCT 1084 if (trace_opto_pipelining()) { 1085 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1086 self->dump(); 1087 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1088 LCA->_pre_order, 1089 LCA->head()->_idx, 1090 start_latency, 1091 LCA->get_node(LCA->end_idx())->_idx, 1092 end_latency, 1093 least_freq); 1094 } 1095 #endif 1096 1097 int cand_cnt = 0; // number of candidates tried 1098 1099 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1100 // the earliest legal location. Capture the least execution frequency. 1101 while (LCA != early) { 1102 LCA = LCA->_idom; // Follow up the dominator tree 1103 1104 if (LCA == NULL) { 1105 // Bailout without retry 1106 assert(false, "graph should be schedulable"); 1107 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1108 return least; 1109 } 1110 1111 // Don't hoist machine instructions to the root basic block 1112 if (mach && LCA == root_block) 1113 break; 1114 1115 uint start_lat = get_latency_for_node(LCA->head()); 1116 uint end_idx = LCA->end_idx(); 1117 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1118 double LCA_freq = LCA->_freq; 1119 #ifndef PRODUCT 1120 if (trace_opto_pipelining()) { 1121 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1122 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1123 } 1124 #endif 1125 cand_cnt++; 1126 if (LCA_freq < least_freq || // Better Frequency 1127 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1128 (!StressGCM && // Otherwise, choose with latency 1129 !in_latency && // No block containing latency 1130 LCA_freq < least_freq * delta && // No worse frequency 1131 target >= end_lat && // within latency range 1132 !self->is_iteratively_computed() ) // But don't hoist IV increments 1133 // because they may end up above other uses of their phi forcing 1134 // their result register to be different from their input. 1135 ) { 1136 least = LCA; // Found cheaper block 1137 least_freq = LCA_freq; 1138 start_latency = start_lat; 1139 end_latency = end_lat; 1140 if (target <= start_lat) 1141 in_latency = true; 1142 } 1143 } 1144 1145 #ifndef PRODUCT 1146 if (trace_opto_pipelining()) { 1147 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1148 least->_pre_order, start_latency, least_freq); 1149 } 1150 #endif 1151 1152 // See if the latency needs to be updated 1153 if (target < end_latency) { 1154 #ifndef PRODUCT 1155 if (trace_opto_pipelining()) { 1156 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1157 } 1158 #endif 1159 set_latency_for_node(self, end_latency); 1160 partial_latency_of_defs(self); 1161 } 1162 1163 return least; 1164 } 1165 1166 1167 //------------------------------schedule_late----------------------------------- 1168 // Now schedule all codes as LATE as possible. This is the LCA in the 1169 // dominator tree of all USES of a value. Pick the block with the least 1170 // loop nesting depth that is lowest in the dominator tree. 1171 extern const char must_clone[]; 1172 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1173 #ifndef PRODUCT 1174 if (trace_opto_pipelining()) 1175 tty->print("\n#---- schedule_late ----\n"); 1176 #endif 1177 1178 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1179 Node *self; 1180 1181 // Walk over all the nodes from last to first 1182 while (self = iter.next()) { 1183 Block* early = get_block_for_node(self); // Earliest legal placement 1184 1185 if (self->is_top()) { 1186 // Top node goes in bb #2 with other constants. 1187 // It must be special-cased, because it has no out edges. 1188 early->add_inst(self); 1189 continue; 1190 } 1191 1192 // No uses, just terminate 1193 if (self->outcnt() == 0) { 1194 assert(self->is_MachProj(), "sanity"); 1195 continue; // Must be a dead machine projection 1196 } 1197 1198 // If node is pinned in the block, then no scheduling can be done. 1199 if( self->pinned() ) // Pinned in block? 1200 continue; 1201 1202 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1203 if (mach) { 1204 switch (mach->ideal_Opcode()) { 1205 case Op_CreateEx: 1206 // Don't move exception creation 1207 early->add_inst(self); 1208 continue; 1209 break; 1210 case Op_CheckCastPP: 1211 // Don't move CheckCastPP nodes away from their input, if the input 1212 // is a rawptr (5071820). 1213 Node *def = self->in(1); 1214 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1215 early->add_inst(self); 1216 #ifdef ASSERT 1217 _raw_oops.push(def); 1218 #endif 1219 continue; 1220 } 1221 break; 1222 } 1223 } 1224 1225 // Gather LCA of all uses 1226 Block *LCA = NULL; 1227 { 1228 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1229 // For all uses, find LCA 1230 Node* use = self->fast_out(i); 1231 LCA = raise_LCA_above_use(LCA, use, self, this); 1232 } 1233 } // (Hide defs of imax, i from rest of block.) 1234 1235 // Place temps in the block of their use. This isn't a 1236 // requirement for correctness but it reduces useless 1237 // interference between temps and other nodes. 1238 if (mach != NULL && mach->is_MachTemp()) { 1239 map_node_to_block(self, LCA); 1240 LCA->add_inst(self); 1241 continue; 1242 } 1243 1244 // Check if 'self' could be anti-dependent on memory 1245 if (self->needs_anti_dependence_check()) { 1246 // Hoist LCA above possible-defs and insert anti-dependences to 1247 // defs in new LCA block. 1248 LCA = insert_anti_dependences(LCA, self); 1249 } 1250 1251 if (early->_dom_depth > LCA->_dom_depth) { 1252 // Somehow the LCA has moved above the earliest legal point. 1253 // (One way this can happen is via memory_early_block.) 1254 if (C->subsume_loads() == true && !C->failing()) { 1255 // Retry with subsume_loads == false 1256 // If this is the first failure, the sentinel string will "stick" 1257 // to the Compile object, and the C2Compiler will see it and retry. 1258 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1259 } else { 1260 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1261 assert(false, "graph should be schedulable"); 1262 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1263 } 1264 return; 1265 } 1266 1267 // If there is no opportunity to hoist, then we're done. 1268 // In stress mode, try to hoist even the single operations. 1269 bool try_to_hoist = StressGCM || (LCA != early); 1270 1271 // Must clone guys stay next to use; no hoisting allowed. 1272 // Also cannot hoist guys that alter memory or are otherwise not 1273 // allocatable (hoisting can make a value live longer, leading to 1274 // anti and output dependency problems which are normally resolved 1275 // by the register allocator giving everyone a different register). 1276 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1277 try_to_hoist = false; 1278 1279 Block* late = NULL; 1280 if (try_to_hoist) { 1281 // Now find the block with the least execution frequency. 1282 // Start at the latest schedule and work up to the earliest schedule 1283 // in the dominator tree. Thus the Node will dominate all its uses. 1284 late = hoist_to_cheaper_block(LCA, early, self); 1285 } else { 1286 // Just use the LCA of the uses. 1287 late = LCA; 1288 } 1289 1290 // Put the node into target block 1291 schedule_node_into_block(self, late); 1292 1293 #ifdef ASSERT 1294 if (self->needs_anti_dependence_check()) { 1295 // since precedence edges are only inserted when we're sure they 1296 // are needed make sure that after placement in a block we don't 1297 // need any new precedence edges. 1298 verify_anti_dependences(late, self); 1299 } 1300 #endif 1301 } // Loop until all nodes have been visited 1302 1303 } // end ScheduleLate 1304 1305 //------------------------------GlobalCodeMotion------------------------------- 1306 void PhaseCFG::global_code_motion() { 1307 ResourceMark rm; 1308 1309 #ifndef PRODUCT 1310 if (trace_opto_pipelining()) { 1311 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1312 } 1313 #endif 1314 1315 // Initialize the node to block mapping for things on the proj_list 1316 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1317 unmap_node_from_block(_matcher.get_projection(i)); 1318 } 1319 1320 // Set the basic block for Nodes pinned into blocks 1321 Arena* arena = Thread::current()->resource_area(); 1322 VectorSet visited(arena); 1323 schedule_pinned_nodes(visited); 1324 1325 // Find the earliest Block any instruction can be placed in. Some 1326 // instructions are pinned into Blocks. Unpinned instructions can 1327 // appear in last block in which all their inputs occur. 1328 visited.Clear(); 1329 Node_List stack(arena); 1330 // Pre-grow the list 1331 stack.map((C->live_nodes() >> 1) + 16, NULL); 1332 if (!schedule_early(visited, stack)) { 1333 // Bailout without retry 1334 C->record_method_not_compilable("early schedule failed"); 1335 return; 1336 } 1337 1338 // Build Def-Use edges. 1339 // Compute the latency information (via backwards walk) for all the 1340 // instructions in the graph 1341 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1342 1343 if (C->do_scheduling()) { 1344 compute_latencies_backwards(visited, stack); 1345 } 1346 1347 // Now schedule all codes as LATE as possible. This is the LCA in the 1348 // dominator tree of all USES of a value. Pick the block with the least 1349 // loop nesting depth that is lowest in the dominator tree. 1350 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1351 schedule_late(visited, stack); 1352 if (C->failing()) { 1353 return; 1354 } 1355 1356 #ifndef PRODUCT 1357 if (trace_opto_pipelining()) { 1358 tty->print("\n---- Detect implicit null checks ----\n"); 1359 } 1360 #endif 1361 1362 // Detect implicit-null-check opportunities. Basically, find NULL checks 1363 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1364 // I can generate a memory op if there is not one nearby. 1365 if (C->is_method_compilation()) { 1366 // By reversing the loop direction we get a very minor gain on mpegaudio. 1367 // Feel free to revert to a forward loop for clarity. 1368 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1369 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1370 Node* proj = _matcher._null_check_tests[i]; 1371 Node* val = _matcher._null_check_tests[i + 1]; 1372 Block* block = get_block_for_node(proj); 1373 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1374 // The implicit_null_check will only perform the transformation 1375 // if the null branch is truly uncommon, *and* it leads to an 1376 // uncommon trap. Combined with the too_many_traps guards 1377 // above, this prevents SEGV storms reported in 6366351, 1378 // by recompiling offending methods without this optimization. 1379 } 1380 } 1381 1382 #ifndef PRODUCT 1383 if (trace_opto_pipelining()) { 1384 tty->print("\n---- Start Local Scheduling ----\n"); 1385 } 1386 #endif 1387 1388 // Schedule locally. Right now a simple topological sort. 1389 // Later, do a real latency aware scheduler. 1390 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1391 visited.Clear(); 1392 for (uint i = 0; i < number_of_blocks(); i++) { 1393 Block* block = get_block(i); 1394 if (!schedule_local(block, ready_cnt, visited)) { 1395 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1396 C->record_method_not_compilable("local schedule failed"); 1397 } 1398 return; 1399 } 1400 } 1401 1402 // If we inserted any instructions between a Call and his CatchNode, 1403 // clone the instructions on all paths below the Catch. 1404 for (uint i = 0; i < number_of_blocks(); i++) { 1405 Block* block = get_block(i); 1406 call_catch_cleanup(block); 1407 } 1408 1409 #ifndef PRODUCT 1410 if (trace_opto_pipelining()) { 1411 tty->print("\n---- After GlobalCodeMotion ----\n"); 1412 for (uint i = 0; i < number_of_blocks(); i++) { 1413 Block* block = get_block(i); 1414 block->dump(); 1415 } 1416 } 1417 #endif 1418 // Dead. 1419 _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef); 1420 } 1421 1422 bool PhaseCFG::do_global_code_motion() { 1423 1424 build_dominator_tree(); 1425 if (C->failing()) { 1426 return false; 1427 } 1428 1429 NOT_PRODUCT( C->verify_graph_edges(); ) 1430 1431 estimate_block_frequency(); 1432 1433 global_code_motion(); 1434 1435 if (C->failing()) { 1436 return false; 1437 } 1438 1439 return true; 1440 } 1441 1442 //------------------------------Estimate_Block_Frequency----------------------- 1443 // Estimate block frequencies based on IfNode probabilities. 1444 void PhaseCFG::estimate_block_frequency() { 1445 1446 // Force conditional branches leading to uncommon traps to be unlikely, 1447 // not because we get to the uncommon_trap with less relative frequency, 1448 // but because an uncommon_trap typically causes a deopt, so we only get 1449 // there once. 1450 if (C->do_freq_based_layout()) { 1451 Block_List worklist; 1452 Block* root_blk = get_block(0); 1453 for (uint i = 1; i < root_blk->num_preds(); i++) { 1454 Block *pb = get_block_for_node(root_blk->pred(i)); 1455 if (pb->has_uncommon_code()) { 1456 worklist.push(pb); 1457 } 1458 } 1459 while (worklist.size() > 0) { 1460 Block* uct = worklist.pop(); 1461 if (uct == get_root_block()) { 1462 continue; 1463 } 1464 for (uint i = 1; i < uct->num_preds(); i++) { 1465 Block *pb = get_block_for_node(uct->pred(i)); 1466 if (pb->_num_succs == 1) { 1467 worklist.push(pb); 1468 } else if (pb->num_fall_throughs() == 2) { 1469 pb->update_uncommon_branch(uct); 1470 } 1471 } 1472 } 1473 } 1474 1475 // Create the loop tree and calculate loop depth. 1476 _root_loop = create_loop_tree(); 1477 _root_loop->compute_loop_depth(0); 1478 1479 // Compute block frequency of each block, relative to a single loop entry. 1480 _root_loop->compute_freq(); 1481 1482 // Adjust all frequencies to be relative to a single method entry 1483 _root_loop->_freq = 1.0; 1484 _root_loop->scale_freq(); 1485 1486 // Save outmost loop frequency for LRG frequency threshold 1487 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1488 1489 // force paths ending at uncommon traps to be infrequent 1490 if (!C->do_freq_based_layout()) { 1491 Block_List worklist; 1492 Block* root_blk = get_block(0); 1493 for (uint i = 1; i < root_blk->num_preds(); i++) { 1494 Block *pb = get_block_for_node(root_blk->pred(i)); 1495 if (pb->has_uncommon_code()) { 1496 worklist.push(pb); 1497 } 1498 } 1499 while (worklist.size() > 0) { 1500 Block* uct = worklist.pop(); 1501 uct->_freq = PROB_MIN; 1502 for (uint i = 1; i < uct->num_preds(); i++) { 1503 Block *pb = get_block_for_node(uct->pred(i)); 1504 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1505 worklist.push(pb); 1506 } 1507 } 1508 } 1509 } 1510 1511 #ifdef ASSERT 1512 for (uint i = 0; i < number_of_blocks(); i++) { 1513 Block* b = get_block(i); 1514 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1515 } 1516 #endif 1517 1518 #ifndef PRODUCT 1519 if (PrintCFGBlockFreq) { 1520 tty->print_cr("CFG Block Frequencies"); 1521 _root_loop->dump_tree(); 1522 if (Verbose) { 1523 tty->print_cr("PhaseCFG dump"); 1524 dump(); 1525 tty->print_cr("Node dump"); 1526 _root->dump(99999); 1527 } 1528 } 1529 #endif 1530 } 1531 1532 //----------------------------create_loop_tree-------------------------------- 1533 // Create a loop tree from the CFG 1534 CFGLoop* PhaseCFG::create_loop_tree() { 1535 1536 #ifdef ASSERT 1537 assert(get_block(0) == get_root_block(), "first block should be root block"); 1538 for (uint i = 0; i < number_of_blocks(); i++) { 1539 Block* block = get_block(i); 1540 // Check that _loop field are clear...we could clear them if not. 1541 assert(block->_loop == NULL, "clear _loop expected"); 1542 // Sanity check that the RPO numbering is reflected in the _blocks array. 1543 // It doesn't have to be for the loop tree to be built, but if it is not, 1544 // then the blocks have been reordered since dom graph building...which 1545 // may question the RPO numbering 1546 assert(block->_rpo == i, "unexpected reverse post order number"); 1547 } 1548 #endif 1549 1550 int idct = 0; 1551 CFGLoop* root_loop = new CFGLoop(idct++); 1552 1553 Block_List worklist; 1554 1555 // Assign blocks to loops 1556 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1557 Block* block = get_block(i); 1558 1559 if (block->head()->is_Loop()) { 1560 Block* loop_head = block; 1561 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1562 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1563 Block* tail = get_block_for_node(tail_n); 1564 1565 // Defensively filter out Loop nodes for non-single-entry loops. 1566 // For all reasonable loops, the head occurs before the tail in RPO. 1567 if (i <= tail->_rpo) { 1568 1569 // The tail and (recursive) predecessors of the tail 1570 // are made members of a new loop. 1571 1572 assert(worklist.size() == 0, "nonempty worklist"); 1573 CFGLoop* nloop = new CFGLoop(idct++); 1574 assert(loop_head->_loop == NULL, "just checking"); 1575 loop_head->_loop = nloop; 1576 // Add to nloop so push_pred() will skip over inner loops 1577 nloop->add_member(loop_head); 1578 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1579 1580 while (worklist.size() > 0) { 1581 Block* member = worklist.pop(); 1582 if (member != loop_head) { 1583 for (uint j = 1; j < member->num_preds(); j++) { 1584 nloop->push_pred(member, j, worklist, this); 1585 } 1586 } 1587 } 1588 } 1589 } 1590 } 1591 1592 // Create a member list for each loop consisting 1593 // of both blocks and (immediate child) loops. 1594 for (uint i = 0; i < number_of_blocks(); i++) { 1595 Block* block = get_block(i); 1596 CFGLoop* lp = block->_loop; 1597 if (lp == NULL) { 1598 // Not assigned to a loop. Add it to the method's pseudo loop. 1599 block->_loop = root_loop; 1600 lp = root_loop; 1601 } 1602 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1603 lp->add_member(block); 1604 } 1605 if (lp != root_loop) { 1606 if (lp->parent() == NULL) { 1607 // Not a nested loop. Make it a child of the method's pseudo loop. 1608 root_loop->add_nested_loop(lp); 1609 } 1610 if (block == lp->head()) { 1611 // Add nested loop to member list of parent loop. 1612 lp->parent()->add_member(lp); 1613 } 1614 } 1615 } 1616 1617 return root_loop; 1618 } 1619 1620 //------------------------------push_pred-------------------------------------- 1621 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1622 Node* pred_n = blk->pred(i); 1623 Block* pred = cfg->get_block_for_node(pred_n); 1624 CFGLoop *pred_loop = pred->_loop; 1625 if (pred_loop == NULL) { 1626 // Filter out blocks for non-single-entry loops. 1627 // For all reasonable loops, the head occurs before the tail in RPO. 1628 if (pred->_rpo > head()->_rpo) { 1629 pred->_loop = this; 1630 worklist.push(pred); 1631 } 1632 } else if (pred_loop != this) { 1633 // Nested loop. 1634 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1635 pred_loop = pred_loop->_parent; 1636 } 1637 // Make pred's loop be a child 1638 if (pred_loop->_parent == NULL) { 1639 add_nested_loop(pred_loop); 1640 // Continue with loop entry predecessor. 1641 Block* pred_head = pred_loop->head(); 1642 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1643 assert(pred_head != head(), "loop head in only one loop"); 1644 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1645 } else { 1646 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1647 } 1648 } 1649 } 1650 1651 //------------------------------add_nested_loop-------------------------------- 1652 // Make cl a child of the current loop in the loop tree. 1653 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1654 assert(_parent == NULL, "no parent yet"); 1655 assert(cl != this, "not my own parent"); 1656 cl->_parent = this; 1657 CFGLoop* ch = _child; 1658 if (ch == NULL) { 1659 _child = cl; 1660 } else { 1661 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1662 ch->_sibling = cl; 1663 } 1664 } 1665 1666 //------------------------------compute_loop_depth----------------------------- 1667 // Store the loop depth in each CFGLoop object. 1668 // Recursively walk the children to do the same for them. 1669 void CFGLoop::compute_loop_depth(int depth) { 1670 _depth = depth; 1671 CFGLoop* ch = _child; 1672 while (ch != NULL) { 1673 ch->compute_loop_depth(depth + 1); 1674 ch = ch->_sibling; 1675 } 1676 } 1677 1678 //------------------------------compute_freq----------------------------------- 1679 // Compute the frequency of each block and loop, relative to a single entry 1680 // into the dominating loop head. 1681 void CFGLoop::compute_freq() { 1682 // Bottom up traversal of loop tree (visit inner loops first.) 1683 // Set loop head frequency to 1.0, then transitively 1684 // compute frequency for all successors in the loop, 1685 // as well as for each exit edge. Inner loops are 1686 // treated as single blocks with loop exit targets 1687 // as the successor blocks. 1688 1689 // Nested loops first 1690 CFGLoop* ch = _child; 1691 while (ch != NULL) { 1692 ch->compute_freq(); 1693 ch = ch->_sibling; 1694 } 1695 assert (_members.length() > 0, "no empty loops"); 1696 Block* hd = head(); 1697 hd->_freq = 1.0f; 1698 for (int i = 0; i < _members.length(); i++) { 1699 CFGElement* s = _members.at(i); 1700 float freq = s->_freq; 1701 if (s->is_block()) { 1702 Block* b = s->as_Block(); 1703 for (uint j = 0; j < b->_num_succs; j++) { 1704 Block* sb = b->_succs[j]; 1705 update_succ_freq(sb, freq * b->succ_prob(j)); 1706 } 1707 } else { 1708 CFGLoop* lp = s->as_CFGLoop(); 1709 assert(lp->_parent == this, "immediate child"); 1710 for (int k = 0; k < lp->_exits.length(); k++) { 1711 Block* eb = lp->_exits.at(k).get_target(); 1712 float prob = lp->_exits.at(k).get_prob(); 1713 update_succ_freq(eb, freq * prob); 1714 } 1715 } 1716 } 1717 1718 // For all loops other than the outer, "method" loop, 1719 // sum and normalize the exit probability. The "method" loop 1720 // should keep the initial exit probability of 1, so that 1721 // inner blocks do not get erroneously scaled. 1722 if (_depth != 0) { 1723 // Total the exit probabilities for this loop. 1724 float exits_sum = 0.0f; 1725 for (int i = 0; i < _exits.length(); i++) { 1726 exits_sum += _exits.at(i).get_prob(); 1727 } 1728 1729 // Normalize the exit probabilities. Until now, the 1730 // probabilities estimate the possibility of exit per 1731 // a single loop iteration; afterward, they estimate 1732 // the probability of exit per loop entry. 1733 for (int i = 0; i < _exits.length(); i++) { 1734 Block* et = _exits.at(i).get_target(); 1735 float new_prob = 0.0f; 1736 if (_exits.at(i).get_prob() > 0.0f) { 1737 new_prob = _exits.at(i).get_prob() / exits_sum; 1738 } 1739 BlockProbPair bpp(et, new_prob); 1740 _exits.at_put(i, bpp); 1741 } 1742 1743 // Save the total, but guard against unreasonable probability, 1744 // as the value is used to estimate the loop trip count. 1745 // An infinite trip count would blur relative block 1746 // frequencies. 1747 if (exits_sum > 1.0f) exits_sum = 1.0; 1748 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1749 _exit_prob = exits_sum; 1750 } 1751 } 1752 1753 //------------------------------succ_prob------------------------------------- 1754 // Determine the probability of reaching successor 'i' from the receiver block. 1755 float Block::succ_prob(uint i) { 1756 int eidx = end_idx(); 1757 Node *n = get_node(eidx); // Get ending Node 1758 1759 int op = n->Opcode(); 1760 if (n->is_Mach()) { 1761 if (n->is_MachNullCheck()) { 1762 // Can only reach here if called after lcm. The original Op_If is gone, 1763 // so we attempt to infer the probability from one or both of the 1764 // successor blocks. 1765 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1766 // If either successor has only one predecessor, then the 1767 // probability estimate can be derived using the 1768 // relative frequency of the successor and this block. 1769 if (_succs[i]->num_preds() == 2) { 1770 return _succs[i]->_freq / _freq; 1771 } else if (_succs[1-i]->num_preds() == 2) { 1772 return 1 - (_succs[1-i]->_freq / _freq); 1773 } else { 1774 // Estimate using both successor frequencies 1775 float freq = _succs[i]->_freq; 1776 return freq / (freq + _succs[1-i]->_freq); 1777 } 1778 } 1779 op = n->as_Mach()->ideal_Opcode(); 1780 } 1781 1782 1783 // Switch on branch type 1784 switch( op ) { 1785 case Op_CountedLoopEnd: 1786 case Op_If: { 1787 assert (i < 2, "just checking"); 1788 // Conditionals pass on only part of their frequency 1789 float prob = n->as_MachIf()->_prob; 1790 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1791 // If succ[i] is the FALSE branch, invert path info 1792 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1793 return 1.0f - prob; // not taken 1794 } else { 1795 return prob; // taken 1796 } 1797 } 1798 1799 case Op_Jump: 1800 // Divide the frequency between all successors evenly 1801 return 1.0f/_num_succs; 1802 1803 case Op_Catch: { 1804 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1805 if (ci->_con == CatchProjNode::fall_through_index) { 1806 // Fall-thru path gets the lion's share. 1807 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1808 } else { 1809 // Presume exceptional paths are equally unlikely 1810 return PROB_UNLIKELY_MAG(5); 1811 } 1812 } 1813 1814 case Op_Root: 1815 case Op_Goto: 1816 // Pass frequency straight thru to target 1817 return 1.0f; 1818 1819 case Op_NeverBranch: 1820 return 0.0f; 1821 1822 case Op_TailCall: 1823 case Op_TailJump: 1824 case Op_Return: 1825 case Op_Halt: 1826 case Op_Rethrow: 1827 // Do not push out freq to root block 1828 return 0.0f; 1829 1830 default: 1831 ShouldNotReachHere(); 1832 } 1833 1834 return 0.0f; 1835 } 1836 1837 //------------------------------num_fall_throughs----------------------------- 1838 // Return the number of fall-through candidates for a block 1839 int Block::num_fall_throughs() { 1840 int eidx = end_idx(); 1841 Node *n = get_node(eidx); // Get ending Node 1842 1843 int op = n->Opcode(); 1844 if (n->is_Mach()) { 1845 if (n->is_MachNullCheck()) { 1846 // In theory, either side can fall-thru, for simplicity sake, 1847 // let's say only the false branch can now. 1848 return 1; 1849 } 1850 op = n->as_Mach()->ideal_Opcode(); 1851 } 1852 1853 // Switch on branch type 1854 switch( op ) { 1855 case Op_CountedLoopEnd: 1856 case Op_If: 1857 return 2; 1858 1859 case Op_Root: 1860 case Op_Goto: 1861 return 1; 1862 1863 case Op_Catch: { 1864 for (uint i = 0; i < _num_succs; i++) { 1865 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1866 if (ci->_con == CatchProjNode::fall_through_index) { 1867 return 1; 1868 } 1869 } 1870 return 0; 1871 } 1872 1873 case Op_Jump: 1874 case Op_NeverBranch: 1875 case Op_TailCall: 1876 case Op_TailJump: 1877 case Op_Return: 1878 case Op_Halt: 1879 case Op_Rethrow: 1880 return 0; 1881 1882 default: 1883 ShouldNotReachHere(); 1884 } 1885 1886 return 0; 1887 } 1888 1889 //------------------------------succ_fall_through----------------------------- 1890 // Return true if a specific successor could be fall-through target. 1891 bool Block::succ_fall_through(uint i) { 1892 int eidx = end_idx(); 1893 Node *n = get_node(eidx); // Get ending Node 1894 1895 int op = n->Opcode(); 1896 if (n->is_Mach()) { 1897 if (n->is_MachNullCheck()) { 1898 // In theory, either side can fall-thru, for simplicity sake, 1899 // let's say only the false branch can now. 1900 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1901 } 1902 op = n->as_Mach()->ideal_Opcode(); 1903 } 1904 1905 // Switch on branch type 1906 switch( op ) { 1907 case Op_CountedLoopEnd: 1908 case Op_If: 1909 case Op_Root: 1910 case Op_Goto: 1911 return true; 1912 1913 case Op_Catch: { 1914 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1915 return ci->_con == CatchProjNode::fall_through_index; 1916 } 1917 1918 case Op_Jump: 1919 case Op_NeverBranch: 1920 case Op_TailCall: 1921 case Op_TailJump: 1922 case Op_Return: 1923 case Op_Halt: 1924 case Op_Rethrow: 1925 return false; 1926 1927 default: 1928 ShouldNotReachHere(); 1929 } 1930 1931 return false; 1932 } 1933 1934 //------------------------------update_uncommon_branch------------------------ 1935 // Update the probability of a two-branch to be uncommon 1936 void Block::update_uncommon_branch(Block* ub) { 1937 int eidx = end_idx(); 1938 Node *n = get_node(eidx); // Get ending Node 1939 1940 int op = n->as_Mach()->ideal_Opcode(); 1941 1942 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 1943 assert(num_fall_throughs() == 2, "must be a two way branch block"); 1944 1945 // Which successor is ub? 1946 uint s; 1947 for (s = 0; s <_num_succs; s++) { 1948 if (_succs[s] == ub) break; 1949 } 1950 assert(s < 2, "uncommon successor must be found"); 1951 1952 // If ub is the true path, make the proability small, else 1953 // ub is the false path, and make the probability large 1954 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 1955 1956 // Get existing probability 1957 float p = n->as_MachIf()->_prob; 1958 1959 if (invert) p = 1.0 - p; 1960 if (p > PROB_MIN) { 1961 p = PROB_MIN; 1962 } 1963 if (invert) p = 1.0 - p; 1964 1965 n->as_MachIf()->_prob = p; 1966 } 1967 1968 //------------------------------update_succ_freq------------------------------- 1969 // Update the appropriate frequency associated with block 'b', a successor of 1970 // a block in this loop. 1971 void CFGLoop::update_succ_freq(Block* b, float freq) { 1972 if (b->_loop == this) { 1973 if (b == head()) { 1974 // back branch within the loop 1975 // Do nothing now, the loop carried frequency will be 1976 // adjust later in scale_freq(). 1977 } else { 1978 // simple branch within the loop 1979 b->_freq += freq; 1980 } 1981 } else if (!in_loop_nest(b)) { 1982 // branch is exit from this loop 1983 BlockProbPair bpp(b, freq); 1984 _exits.append(bpp); 1985 } else { 1986 // branch into nested loop 1987 CFGLoop* ch = b->_loop; 1988 ch->_freq += freq; 1989 } 1990 } 1991 1992 //------------------------------in_loop_nest----------------------------------- 1993 // Determine if block b is in the receiver's loop nest. 1994 bool CFGLoop::in_loop_nest(Block* b) { 1995 int depth = _depth; 1996 CFGLoop* b_loop = b->_loop; 1997 int b_depth = b_loop->_depth; 1998 if (depth == b_depth) { 1999 return true; 2000 } 2001 while (b_depth > depth) { 2002 b_loop = b_loop->_parent; 2003 b_depth = b_loop->_depth; 2004 } 2005 return b_loop == this; 2006 } 2007 2008 //------------------------------scale_freq------------------------------------- 2009 // Scale frequency of loops and blocks by trip counts from outer loops 2010 // Do a top down traversal of loop tree (visit outer loops first.) 2011 void CFGLoop::scale_freq() { 2012 float loop_freq = _freq * trip_count(); 2013 _freq = loop_freq; 2014 for (int i = 0; i < _members.length(); i++) { 2015 CFGElement* s = _members.at(i); 2016 float block_freq = s->_freq * loop_freq; 2017 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 2018 block_freq = MIN_BLOCK_FREQUENCY; 2019 s->_freq = block_freq; 2020 } 2021 CFGLoop* ch = _child; 2022 while (ch != NULL) { 2023 ch->scale_freq(); 2024 ch = ch->_sibling; 2025 } 2026 } 2027 2028 // Frequency of outer loop 2029 float CFGLoop::outer_loop_freq() const { 2030 if (_child != NULL) { 2031 return _child->_freq; 2032 } 2033 return _freq; 2034 } 2035 2036 #ifndef PRODUCT 2037 //------------------------------dump_tree-------------------------------------- 2038 void CFGLoop::dump_tree() const { 2039 dump(); 2040 if (_child != NULL) _child->dump_tree(); 2041 if (_sibling != NULL) _sibling->dump_tree(); 2042 } 2043 2044 //------------------------------dump------------------------------------------- 2045 void CFGLoop::dump() const { 2046 for (int i = 0; i < _depth; i++) tty->print(" "); 2047 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2048 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2049 for (int i = 0; i < _depth; i++) tty->print(" "); 2050 tty->print(" members:"); 2051 int k = 0; 2052 for (int i = 0; i < _members.length(); i++) { 2053 if (k++ >= 6) { 2054 tty->print("\n "); 2055 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2056 k = 0; 2057 } 2058 CFGElement *s = _members.at(i); 2059 if (s->is_block()) { 2060 Block *b = s->as_Block(); 2061 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2062 } else { 2063 CFGLoop* lp = s->as_CFGLoop(); 2064 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2065 } 2066 } 2067 tty->print("\n"); 2068 for (int i = 0; i < _depth; i++) tty->print(" "); 2069 tty->print(" exits: "); 2070 k = 0; 2071 for (int i = 0; i < _exits.length(); i++) { 2072 if (k++ >= 7) { 2073 tty->print("\n "); 2074 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2075 k = 0; 2076 } 2077 Block *blk = _exits.at(i).get_target(); 2078 float prob = _exits.at(i).get_prob(); 2079 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2080 } 2081 tty->print("\n"); 2082 } 2083 #endif