1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "libadt/vectset.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "opto/block.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/machnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "runtime/deoptimization.hpp"
  38 #if defined AD_MD_HPP
  39 # include AD_MD_HPP
  40 #elif defined TARGET_ARCH_MODEL_x86_32
  41 # include "adfiles/ad_x86_32.hpp"
  42 #elif defined TARGET_ARCH_MODEL_x86_64
  43 # include "adfiles/ad_x86_64.hpp"
  44 #elif defined TARGET_ARCH_MODEL_sparc
  45 # include "adfiles/ad_sparc.hpp"
  46 #elif defined TARGET_ARCH_MODEL_zero
  47 # include "adfiles/ad_zero.hpp"
  48 #elif defined TARGET_ARCH_MODEL_ppc_64
  49 # include "adfiles/ad_ppc_64.hpp"
  50 #endif
  51 
  52 
  53 // Portions of code courtesy of Clifford Click
  54 
  55 // Optimization - Graph Style
  56 
  57 // To avoid float value underflow
  58 #define MIN_BLOCK_FREQUENCY 1.e-35f
  59 
  60 //----------------------------schedule_node_into_block-------------------------
  61 // Insert node n into block b. Look for projections of n and make sure they
  62 // are in b also.
  63 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  64   // Set basic block of n, Add n to b,
  65   map_node_to_block(n, b);
  66   b->add_inst(n);
  67 
  68   // After Matching, nearly any old Node may have projections trailing it.
  69   // These are usually machine-dependent flags.  In any case, they might
  70   // float to another block below this one.  Move them up.
  71   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  72     Node*  use  = n->fast_out(i);
  73     if (use->is_Proj()) {
  74       Block* buse = get_block_for_node(use);
  75       if (buse != b) {              // In wrong block?
  76         if (buse != NULL) {
  77           buse->find_remove(use);   // Remove from wrong block
  78         }
  79         map_node_to_block(use, b);
  80         b->add_inst(use);
  81       }
  82     }
  83   }
  84 }
  85 
  86 //----------------------------replace_block_proj_ctrl-------------------------
  87 // Nodes that have is_block_proj() nodes as their control need to use
  88 // the appropriate Region for their actual block as their control since
  89 // the projection will be in a predecessor block.
  90 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  91   const Node *in0 = n->in(0);
  92   assert(in0 != NULL, "Only control-dependent");
  93   const Node *p = in0->is_block_proj();
  94   if (p != NULL && p != n) {    // Control from a block projection?
  95     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  96     // Find trailing Region
  97     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
  98     uint j = 0;
  99     if (pb->_num_succs != 1) {  // More then 1 successor?
 100       // Search for successor
 101       uint max = pb->number_of_nodes();
 102       assert( max > 1, "" );
 103       uint start = max - pb->_num_succs;
 104       // Find which output path belongs to projection
 105       for (j = start; j < max; j++) {
 106         if( pb->get_node(j) == in0 )
 107           break;
 108       }
 109       assert( j < max, "must find" );
 110       // Change control to match head of successor basic block
 111       j -= start;
 112     }
 113     n->set_req(0, pb->_succs[j]->head());
 114   }
 115 }
 116 
 117 
 118 //------------------------------schedule_pinned_nodes--------------------------
 119 // Set the basic block for Nodes pinned into blocks
 120 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
 121   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
 122   GrowableArray <Node *> spstack(C->live_nodes() + 8);
 123   spstack.push(_root);
 124   while (spstack.is_nonempty()) {
 125     Node* node = spstack.pop();
 126     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
 127       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
 128         assert(node->in(0), "pinned Node must have Control");
 129         // Before setting block replace block_proj control edge
 130         replace_block_proj_ctrl(node);
 131         Node* input = node->in(0);
 132         while (!input->is_block_start()) {
 133           input = input->in(0);
 134         }
 135         Block* block = get_block_for_node(input); // Basic block of controlling input
 136         schedule_node_into_block(node, block);
 137       }
 138 
 139       // process all inputs that are non NULL
 140       for (int i = node->req() - 1; i >= 0; --i) {
 141         if (node->in(i) != NULL) {
 142           spstack.push(node->in(i));
 143         }
 144       }
 145     }
 146   }
 147 }
 148 
 149 #ifdef ASSERT
 150 // Assert that new input b2 is dominated by all previous inputs.
 151 // Check this by by seeing that it is dominated by b1, the deepest
 152 // input observed until b2.
 153 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
 154   if (b1 == NULL)  return;
 155   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 156   Block* tmp = b2;
 157   while (tmp != b1 && tmp != NULL) {
 158     tmp = tmp->_idom;
 159   }
 160   if (tmp != b1) {
 161     // Detected an unschedulable graph.  Print some nice stuff and die.
 162     tty->print_cr("!!! Unschedulable graph !!!");
 163     for (uint j=0; j<n->len(); j++) { // For all inputs
 164       Node* inn = n->in(j); // Get input
 165       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
 166       Block* inb = cfg->get_block_for_node(inn);
 167       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 168                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 169       inn->dump();
 170     }
 171     tty->print("Failing node: ");
 172     n->dump();
 173     assert(false, "unscheduable graph");
 174   }
 175 }
 176 #endif
 177 
 178 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
 179   // Find the last input dominated by all other inputs.
 180   Block* deepb           = NULL;        // Deepest block so far
 181   int    deepb_dom_depth = 0;
 182   for (uint k = 0; k < n->len(); k++) { // For all inputs
 183     Node* inn = n->in(k);               // Get input
 184     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
 185     Block* inb = cfg->get_block_for_node(inn);
 186     assert(inb != NULL, "must already have scheduled this input");
 187     if (deepb_dom_depth < (int) inb->_dom_depth) {
 188       // The new inb must be dominated by the previous deepb.
 189       // The various inputs must be linearly ordered in the dom
 190       // tree, or else there will not be a unique deepest block.
 191       DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
 192       deepb = inb;                      // Save deepest block
 193       deepb_dom_depth = deepb->_dom_depth;
 194     }
 195   }
 196   assert(deepb != NULL, "must be at least one input to n");
 197   return deepb;
 198 }
 199 
 200 
 201 //------------------------------schedule_early---------------------------------
 202 // Find the earliest Block any instruction can be placed in.  Some instructions
 203 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 204 // which all their inputs occur.
 205 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
 206   // Allocate stack with enough space to avoid frequent realloc
 207   Node_Stack nstack(roots.Size() + 8);
 208   // _root will be processed among C->top() inputs
 209   roots.push(C->top());
 210   visited.set(C->top()->_idx);
 211 
 212   while (roots.size() != 0) {
 213     // Use local variables nstack_top_n & nstack_top_i to cache values
 214     // on stack's top.
 215     Node* parent_node = roots.pop();
 216     uint  input_index = 0;
 217 
 218     while (true) {
 219       if (input_index == 0) {
 220         // Fixup some control.  Constants without control get attached
 221         // to root and nodes that use is_block_proj() nodes should be attached
 222         // to the region that starts their block.
 223         const Node* control_input = parent_node->in(0);
 224         if (control_input != NULL) {
 225           replace_block_proj_ctrl(parent_node);
 226         } else {
 227           // Is a constant with NO inputs?
 228           if (parent_node->req() == 1) {
 229             parent_node->set_req(0, _root);
 230           }
 231         }
 232       }
 233 
 234       // First, visit all inputs and force them to get a block.  If an
 235       // input is already in a block we quit following inputs (to avoid
 236       // cycles). Instead we put that Node on a worklist to be handled
 237       // later (since IT'S inputs may not have a block yet).
 238 
 239       // Assume all n's inputs will be processed
 240       bool done = true;
 241 
 242       while (input_index < parent_node->len()) {
 243         Node* in = parent_node->in(input_index++);
 244         if (in == NULL) {
 245           continue;
 246         }
 247 
 248         int is_visited = visited.test_set(in->_idx);
 249         if (!has_block(in)) {
 250           if (is_visited) {
 251             assert(false, "graph should be schedulable");
 252             return false;
 253           }
 254           // Save parent node and next input's index.
 255           nstack.push(parent_node, input_index);
 256           // Process current input now.
 257           parent_node = in;
 258           input_index = 0;
 259           // Not all n's inputs processed.
 260           done = false;
 261           break;
 262         } else if (!is_visited) {
 263           // Visit this guy later, using worklist
 264           roots.push(in);
 265         }
 266       }
 267 
 268       if (done) {
 269         // All of n's inputs have been processed, complete post-processing.
 270 
 271         // Some instructions are pinned into a block.  These include Region,
 272         // Phi, Start, Return, and other control-dependent instructions and
 273         // any projections which depend on them.
 274         if (!parent_node->pinned()) {
 275           // Set earliest legal block.
 276           Block* earliest_block = find_deepest_input(parent_node, this);
 277           map_node_to_block(parent_node, earliest_block);
 278         } else {
 279           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
 280         }
 281 
 282         if (nstack.is_empty()) {
 283           // Finished all nodes on stack.
 284           // Process next node on the worklist 'roots'.
 285           break;
 286         }
 287         // Get saved parent node and next input's index.
 288         parent_node = nstack.node();
 289         input_index = nstack.index();
 290         nstack.pop();
 291       }
 292     }
 293   }
 294   return true;
 295 }
 296 
 297 //------------------------------dom_lca----------------------------------------
 298 // Find least common ancestor in dominator tree
 299 // LCA is a current notion of LCA, to be raised above 'this'.
 300 // As a convenient boundary condition, return 'this' if LCA is NULL.
 301 // Find the LCA of those two nodes.
 302 Block* Block::dom_lca(Block* LCA) {
 303   if (LCA == NULL || LCA == this)  return this;
 304 
 305   Block* anc = this;
 306   while (anc->_dom_depth > LCA->_dom_depth)
 307     anc = anc->_idom;           // Walk up till anc is as high as LCA
 308 
 309   while (LCA->_dom_depth > anc->_dom_depth)
 310     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 311 
 312   while (LCA != anc) {          // Walk both up till they are the same
 313     LCA = LCA->_idom;
 314     anc = anc->_idom;
 315   }
 316 
 317   return LCA;
 318 }
 319 
 320 //--------------------------raise_LCA_above_use--------------------------------
 321 // We are placing a definition, and have been given a def->use edge.
 322 // The definition must dominate the use, so move the LCA upward in the
 323 // dominator tree to dominate the use.  If the use is a phi, adjust
 324 // the LCA only with the phi input paths which actually use this def.
 325 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
 326   Block* buse = cfg->get_block_for_node(use);
 327   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
 328   if (!use->is_Phi())  return buse->dom_lca(LCA);
 329   uint pmax = use->req();       // Number of Phi inputs
 330   // Why does not this loop just break after finding the matching input to
 331   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 332   // chains.  Means I cannot distinguish, from the def-use direction, which
 333   // of many use-defs lead from the same use to the same def.  That is, this
 334   // Phi might have several uses of the same def.  Each use appears in a
 335   // different predecessor block.  But when I enter here, I cannot distinguish
 336   // which use-def edge I should find the predecessor block for.  So I find
 337   // them all.  Means I do a little extra work if a Phi uses the same value
 338   // more than once.
 339   for (uint j=1; j<pmax; j++) { // For all inputs
 340     if (use->in(j) == def) {    // Found matching input?
 341       Block* pred = cfg->get_block_for_node(buse->pred(j));
 342       LCA = pred->dom_lca(LCA);
 343     }
 344   }
 345   return LCA;
 346 }
 347 
 348 //----------------------------raise_LCA_above_marks----------------------------
 349 // Return a new LCA that dominates LCA and any of its marked predecessors.
 350 // Search all my parents up to 'early' (exclusive), looking for predecessors
 351 // which are marked with the given index.  Return the LCA (in the dom tree)
 352 // of all marked blocks.  If there are none marked, return the original
 353 // LCA.
 354 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
 355   Block_List worklist;
 356   worklist.push(LCA);
 357   while (worklist.size() > 0) {
 358     Block* mid = worklist.pop();
 359     if (mid == early)  continue;  // stop searching here
 360 
 361     // Test and set the visited bit.
 362     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 363 
 364     // Don't process the current LCA, otherwise the search may terminate early
 365     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 366       // Raise the LCA.
 367       LCA = mid->dom_lca(LCA);
 368       if (LCA == early)  break;   // stop searching everywhere
 369       assert(early->dominates(LCA), "early is high enough");
 370       // Resume searching at that point, skipping intermediate levels.
 371       worklist.push(LCA);
 372       if (LCA == mid)
 373         continue; // Don't mark as visited to avoid early termination.
 374     } else {
 375       // Keep searching through this block's predecessors.
 376       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 377         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
 378         worklist.push(mid_parent);
 379       }
 380     }
 381     mid->set_raise_LCA_visited(mark);
 382   }
 383   return LCA;
 384 }
 385 
 386 //--------------------------memory_early_block--------------------------------
 387 // This is a variation of find_deepest_input, the heart of schedule_early.
 388 // Find the "early" block for a load, if we considered only memory and
 389 // address inputs, that is, if other data inputs were ignored.
 390 //
 391 // Because a subset of edges are considered, the resulting block will
 392 // be earlier (at a shallower dom_depth) than the true schedule_early
 393 // point of the node. We compute this earlier block as a more permissive
 394 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 395 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
 396   Node* base;
 397   Node* index;
 398   Node* store = load->in(MemNode::Memory);
 399   load->as_Mach()->memory_inputs(base, index);
 400 
 401   assert(base != NodeSentinel && index != NodeSentinel,
 402          "unexpected base/index inputs");
 403 
 404   Node* mem_inputs[4];
 405   int mem_inputs_length = 0;
 406   if (base != NULL)  mem_inputs[mem_inputs_length++] = base;
 407   if (index != NULL) mem_inputs[mem_inputs_length++] = index;
 408   if (store != NULL) mem_inputs[mem_inputs_length++] = store;
 409 
 410   // In the comparision below, add one to account for the control input,
 411   // which may be null, but always takes up a spot in the in array.
 412   if (mem_inputs_length + 1 < (int) load->req()) {
 413     // This "load" has more inputs than just the memory, base and index inputs.
 414     // For purposes of checking anti-dependences, we need to start
 415     // from the early block of only the address portion of the instruction,
 416     // and ignore other blocks that may have factored into the wider
 417     // schedule_early calculation.
 418     if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
 419 
 420     Block* deepb           = NULL;        // Deepest block so far
 421     int    deepb_dom_depth = 0;
 422     for (int i = 0; i < mem_inputs_length; i++) {
 423       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
 424       if (deepb_dom_depth < (int) inb->_dom_depth) {
 425         // The new inb must be dominated by the previous deepb.
 426         // The various inputs must be linearly ordered in the dom
 427         // tree, or else there will not be a unique deepest block.
 428         DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
 429         deepb = inb;                      // Save deepest block
 430         deepb_dom_depth = deepb->_dom_depth;
 431       }
 432     }
 433     early = deepb;
 434   }
 435 
 436   return early;
 437 }
 438 
 439 //--------------------------insert_anti_dependences---------------------------
 440 // A load may need to witness memory that nearby stores can overwrite.
 441 // For each nearby store, either insert an "anti-dependence" edge
 442 // from the load to the store, or else move LCA upward to force the
 443 // load to (eventually) be scheduled in a block above the store.
 444 //
 445 // Do not add edges to stores on distinct control-flow paths;
 446 // only add edges to stores which might interfere.
 447 //
 448 // Return the (updated) LCA.  There will not be any possibly interfering
 449 // store between the load's "early block" and the updated LCA.
 450 // Any stores in the updated LCA will have new precedence edges
 451 // back to the load.  The caller is expected to schedule the load
 452 // in the LCA, in which case the precedence edges will make LCM
 453 // preserve anti-dependences.  The caller may also hoist the load
 454 // above the LCA, if it is not the early block.
 455 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
 456   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 457   assert(LCA != NULL, "");
 458   DEBUG_ONLY(Block* LCA_orig = LCA);
 459 
 460   // Compute the alias index.  Loads and stores with different alias indices
 461   // do not need anti-dependence edges.
 462   uint load_alias_idx = C->get_alias_index(load->adr_type());
 463 #ifdef ASSERT
 464   if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
 465       (PrintOpto || VerifyAliases ||
 466        PrintMiscellaneous && (WizardMode || Verbose))) {
 467     // Load nodes should not consume all of memory.
 468     // Reporting a bottom type indicates a bug in adlc.
 469     // If some particular type of node validly consumes all of memory,
 470     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 471     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 472     load->dump(2);
 473     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 474   }
 475 #endif
 476   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
 477          "String compare is only known 'load' that does not conflict with any stores");
 478   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
 479          "String equals is a 'load' that does not conflict with any stores");
 480   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
 481          "String indexOf is a 'load' that does not conflict with any stores");
 482   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
 483          "Arrays equals is a 'load' that do not conflict with any stores");
 484 
 485   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 486     // It is impossible to spoil this load by putting stores before it,
 487     // because we know that the stores will never update the value
 488     // which 'load' must witness.
 489     return LCA;
 490   }
 491 
 492   node_idx_t load_index = load->_idx;
 493 
 494   // Note the earliest legal placement of 'load', as determined by
 495   // by the unique point in the dom tree where all memory effects
 496   // and other inputs are first available.  (Computed by schedule_early.)
 497   // For normal loads, 'early' is the shallowest place (dom graph wise)
 498   // to look for anti-deps between this load and any store.
 499   Block* early = get_block_for_node(load);
 500 
 501   // If we are subsuming loads, compute an "early" block that only considers
 502   // memory or address inputs. This block may be different than the
 503   // schedule_early block in that it could be at an even shallower depth in the
 504   // dominator tree, and allow for a broader discovery of anti-dependences.
 505   if (C->subsume_loads()) {
 506     early = memory_early_block(load, early, this);
 507   }
 508 
 509   ResourceArea *area = Thread::current()->resource_area();
 510   Node_List worklist_mem(area);     // prior memory state to store
 511   Node_List worklist_store(area);   // possible-def to explore
 512   Node_List worklist_visited(area); // visited mergemem nodes
 513   Node_List non_early_stores(area); // all relevant stores outside of early
 514   bool must_raise_LCA = false;
 515 
 516 #ifdef TRACK_PHI_INPUTS
 517   // %%% This extra checking fails because MergeMem nodes are not GVNed.
 518   // Provide "phi_inputs" to check if every input to a PhiNode is from the
 519   // original memory state.  This indicates a PhiNode for which should not
 520   // prevent the load from sinking.  For such a block, set_raise_LCA_mark
 521   // may be overly conservative.
 522   // Mechanism: count inputs seen for each Phi encountered in worklist_store.
 523   DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
 524 #endif
 525 
 526   // 'load' uses some memory state; look for users of the same state.
 527   // Recurse through MergeMem nodes to the stores that use them.
 528 
 529   // Each of these stores is a possible definition of memory
 530   // that 'load' needs to use.  We need to force 'load'
 531   // to occur before each such store.  When the store is in
 532   // the same block as 'load', we insert an anti-dependence
 533   // edge load->store.
 534 
 535   // The relevant stores "nearby" the load consist of a tree rooted
 536   // at initial_mem, with internal nodes of type MergeMem.
 537   // Therefore, the branches visited by the worklist are of this form:
 538   //    initial_mem -> (MergeMem ->)* store
 539   // The anti-dependence constraints apply only to the fringe of this tree.
 540 
 541   Node* initial_mem = load->in(MemNode::Memory);
 542   worklist_store.push(initial_mem);
 543   worklist_visited.push(initial_mem);
 544   worklist_mem.push(NULL);
 545   while (worklist_store.size() > 0) {
 546     // Examine a nearby store to see if it might interfere with our load.
 547     Node* mem   = worklist_mem.pop();
 548     Node* store = worklist_store.pop();
 549     uint op = store->Opcode();
 550 
 551     // MergeMems do not directly have anti-deps.
 552     // Treat them as internal nodes in a forward tree of memory states,
 553     // the leaves of which are each a 'possible-def'.
 554     if (store == initial_mem    // root (exclusive) of tree we are searching
 555         || op == Op_MergeMem    // internal node of tree we are searching
 556         ) {
 557       mem = store;   // It's not a possibly interfering store.
 558       if (store == initial_mem)
 559         initial_mem = NULL;  // only process initial memory once
 560 
 561       for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 562         store = mem->fast_out(i);
 563         if (store->is_MergeMem()) {
 564           // Be sure we don't get into combinatorial problems.
 565           // (Allow phis to be repeated; they can merge two relevant states.)
 566           uint j = worklist_visited.size();
 567           for (; j > 0; j--) {
 568             if (worklist_visited.at(j-1) == store)  break;
 569           }
 570           if (j > 0)  continue; // already on work list; do not repeat
 571           worklist_visited.push(store);
 572         }
 573         worklist_mem.push(mem);
 574         worklist_store.push(store);
 575       }
 576       continue;
 577     }
 578 
 579     if (op == Op_MachProj || op == Op_Catch)   continue;
 580     if (store->needs_anti_dependence_check())  continue;  // not really a store
 581 
 582     // Compute the alias index.  Loads and stores with different alias
 583     // indices do not need anti-dependence edges.  Wide MemBar's are
 584     // anti-dependent on everything (except immutable memories).
 585     const TypePtr* adr_type = store->adr_type();
 586     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 587 
 588     // Most slow-path runtime calls do NOT modify Java memory, but
 589     // they can block and so write Raw memory.
 590     if (store->is_Mach()) {
 591       MachNode* mstore = store->as_Mach();
 592       if (load_alias_idx != Compile::AliasIdxRaw) {
 593         // Check for call into the runtime using the Java calling
 594         // convention (and from there into a wrapper); it has no
 595         // _method.  Can't do this optimization for Native calls because
 596         // they CAN write to Java memory.
 597         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
 598           assert(mstore->is_MachSafePoint(), "");
 599           MachSafePointNode* ms = (MachSafePointNode*) mstore;
 600           assert(ms->is_MachCallJava(), "");
 601           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 602           if (mcj->_method == NULL) {
 603             // These runtime calls do not write to Java visible memory
 604             // (other than Raw) and so do not require anti-dependence edges.
 605             continue;
 606           }
 607         }
 608         // Same for SafePoints: they read/write Raw but only read otherwise.
 609         // This is basically a workaround for SafePoints only defining control
 610         // instead of control + memory.
 611         if (mstore->ideal_Opcode() == Op_SafePoint)
 612           continue;
 613       } else {
 614         // Some raw memory, such as the load of "top" at an allocation,
 615         // can be control dependent on the previous safepoint. See
 616         // comments in GraphKit::allocate_heap() about control input.
 617         // Inserting an anti-dep between such a safepoint and a use
 618         // creates a cycle, and will cause a subsequent failure in
 619         // local scheduling.  (BugId 4919904)
 620         // (%%% How can a control input be a safepoint and not a projection??)
 621         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
 622           continue;
 623       }
 624     }
 625 
 626     // Identify a block that the current load must be above,
 627     // or else observe that 'store' is all the way up in the
 628     // earliest legal block for 'load'.  In the latter case,
 629     // immediately insert an anti-dependence edge.
 630     Block* store_block = get_block_for_node(store);
 631     assert(store_block != NULL, "unused killing projections skipped above");
 632 
 633     if (store->is_Phi()) {
 634       // 'load' uses memory which is one (or more) of the Phi's inputs.
 635       // It must be scheduled not before the Phi, but rather before
 636       // each of the relevant Phi inputs.
 637       //
 638       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
 639       // we mark each corresponding predecessor block and do a combined
 640       // hoisting operation later (raise_LCA_above_marks).
 641       //
 642       // Do not assert(store_block != early, "Phi merging memory after access")
 643       // PhiNode may be at start of block 'early' with backedge to 'early'
 644       DEBUG_ONLY(bool found_match = false);
 645       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
 646         if (store->in(j) == mem) {   // Found matching input?
 647           DEBUG_ONLY(found_match = true);
 648           Block* pred_block = get_block_for_node(store_block->pred(j));
 649           if (pred_block != early) {
 650             // If any predecessor of the Phi matches the load's "early block",
 651             // we do not need a precedence edge between the Phi and 'load'
 652             // since the load will be forced into a block preceding the Phi.
 653             pred_block->set_raise_LCA_mark(load_index);
 654             assert(!LCA_orig->dominates(pred_block) ||
 655                    early->dominates(pred_block), "early is high enough");
 656             must_raise_LCA = true;
 657           } else {
 658             // anti-dependent upon PHI pinned below 'early', no edge needed
 659             LCA = early;             // but can not schedule below 'early'
 660           }
 661         }
 662       }
 663       assert(found_match, "no worklist bug");
 664 #ifdef TRACK_PHI_INPUTS
 665 #ifdef ASSERT
 666       // This assert asks about correct handling of PhiNodes, which may not
 667       // have all input edges directly from 'mem'. See BugId 4621264
 668       int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
 669       // Increment by exactly one even if there are multiple copies of 'mem'
 670       // coming into the phi, because we will run this block several times
 671       // if there are several copies of 'mem'.  (That's how DU iterators work.)
 672       phi_inputs.at_put(store->_idx, num_mem_inputs);
 673       assert(PhiNode::Input + num_mem_inputs < store->req(),
 674              "Expect at least one phi input will not be from original memory state");
 675 #endif //ASSERT
 676 #endif //TRACK_PHI_INPUTS
 677     } else if (store_block != early) {
 678       // 'store' is between the current LCA and earliest possible block.
 679       // Label its block, and decide later on how to raise the LCA
 680       // to include the effect on LCA of this store.
 681       // If this store's block gets chosen as the raised LCA, we
 682       // will find him on the non_early_stores list and stick him
 683       // with a precedence edge.
 684       // (But, don't bother if LCA is already raised all the way.)
 685       if (LCA != early) {
 686         store_block->set_raise_LCA_mark(load_index);
 687         must_raise_LCA = true;
 688         non_early_stores.push(store);
 689       }
 690     } else {
 691       // Found a possibly-interfering store in the load's 'early' block.
 692       // This means 'load' cannot sink at all in the dominator tree.
 693       // Add an anti-dep edge, and squeeze 'load' into the highest block.
 694       assert(store != load->in(0), "dependence cycle found");
 695       if (verify) {
 696         assert(store->find_edge(load) != -1, "missing precedence edge");
 697       } else {
 698         store->add_prec(load);
 699       }
 700       LCA = early;
 701       // This turns off the process of gathering non_early_stores.
 702     }
 703   }
 704   // (Worklist is now empty; all nearby stores have been visited.)
 705 
 706   // Finished if 'load' must be scheduled in its 'early' block.
 707   // If we found any stores there, they have already been given
 708   // precedence edges.
 709   if (LCA == early)  return LCA;
 710 
 711   // We get here only if there are no possibly-interfering stores
 712   // in the load's 'early' block.  Move LCA up above all predecessors
 713   // which contain stores we have noted.
 714   //
 715   // The raised LCA block can be a home to such interfering stores,
 716   // but its predecessors must not contain any such stores.
 717   //
 718   // The raised LCA will be a lower bound for placing the load,
 719   // preventing the load from sinking past any block containing
 720   // a store that may invalidate the memory state required by 'load'.
 721   if (must_raise_LCA)
 722     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
 723   if (LCA == early)  return LCA;
 724 
 725   // Insert anti-dependence edges from 'load' to each store
 726   // in the non-early LCA block.
 727   // Mine the non_early_stores list for such stores.
 728   if (LCA->raise_LCA_mark() == load_index) {
 729     while (non_early_stores.size() > 0) {
 730       Node* store = non_early_stores.pop();
 731       Block* store_block = get_block_for_node(store);
 732       if (store_block == LCA) {
 733         // add anti_dependence from store to load in its own block
 734         assert(store != load->in(0), "dependence cycle found");
 735         if (verify) {
 736           assert(store->find_edge(load) != -1, "missing precedence edge");
 737         } else {
 738           store->add_prec(load);
 739         }
 740       } else {
 741         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
 742         // Any other stores we found must be either inside the new LCA
 743         // or else outside the original LCA.  In the latter case, they
 744         // did not interfere with any use of 'load'.
 745         assert(LCA->dominates(store_block)
 746                || !LCA_orig->dominates(store_block), "no stray stores");
 747       }
 748     }
 749   }
 750 
 751   // Return the highest block containing stores; any stores
 752   // within that block have been given anti-dependence edges.
 753   return LCA;
 754 }
 755 
 756 // This class is used to iterate backwards over the nodes in the graph.
 757 
 758 class Node_Backward_Iterator {
 759 
 760 private:
 761   Node_Backward_Iterator();
 762 
 763 public:
 764   // Constructor for the iterator
 765   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
 766 
 767   // Postincrement operator to iterate over the nodes
 768   Node *next();
 769 
 770 private:
 771   VectorSet   &_visited;
 772   Node_List   &_stack;
 773   PhaseCFG &_cfg;
 774 };
 775 
 776 // Constructor for the Node_Backward_Iterator
 777 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
 778   : _visited(visited), _stack(stack), _cfg(cfg) {
 779   // The stack should contain exactly the root
 780   stack.clear();
 781   stack.push(root);
 782 
 783   // Clear the visited bits
 784   visited.Clear();
 785 }
 786 
 787 // Iterator for the Node_Backward_Iterator
 788 Node *Node_Backward_Iterator::next() {
 789 
 790   // If the _stack is empty, then just return NULL: finished.
 791   if ( !_stack.size() )
 792     return NULL;
 793 
 794   // '_stack' is emulating a real _stack.  The 'visit-all-users' loop has been
 795   // made stateless, so I do not need to record the index 'i' on my _stack.
 796   // Instead I visit all users each time, scanning for unvisited users.
 797   // I visit unvisited not-anti-dependence users first, then anti-dependent
 798   // children next.
 799   Node *self = _stack.pop();
 800 
 801   // I cycle here when I am entering a deeper level of recursion.
 802   // The key variable 'self' was set prior to jumping here.
 803   while( 1 ) {
 804 
 805     _visited.set(self->_idx);
 806 
 807     // Now schedule all uses as late as possible.
 808     const Node* src = self->is_Proj() ? self->in(0) : self;
 809     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
 810 
 811     // Schedule all nodes in a post-order visit
 812     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
 813 
 814     // Scan for unvisited nodes
 815     for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
 816       // For all uses, schedule late
 817       Node* n = self->fast_out(i); // Use
 818 
 819       // Skip already visited children
 820       if ( _visited.test(n->_idx) )
 821         continue;
 822 
 823       // do not traverse backward control edges
 824       Node *use = n->is_Proj() ? n->in(0) : n;
 825       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
 826 
 827       if ( use_rpo < src_rpo )
 828         continue;
 829 
 830       // Phi nodes always precede uses in a basic block
 831       if ( use_rpo == src_rpo && use->is_Phi() )
 832         continue;
 833 
 834       unvisited = n;      // Found unvisited
 835 
 836       // Check for possible-anti-dependent
 837       if( !n->needs_anti_dependence_check() )
 838         break;            // Not visited, not anti-dep; schedule it NOW
 839     }
 840 
 841     // Did I find an unvisited not-anti-dependent Node?
 842     if ( !unvisited )
 843       break;                  // All done with children; post-visit 'self'
 844 
 845     // Visit the unvisited Node.  Contains the obvious push to
 846     // indicate I'm entering a deeper level of recursion.  I push the
 847     // old state onto the _stack and set a new state and loop (recurse).
 848     _stack.push(self);
 849     self = unvisited;
 850   } // End recursion loop
 851 
 852   return self;
 853 }
 854 
 855 //------------------------------ComputeLatenciesBackwards----------------------
 856 // Compute the latency of all the instructions.
 857 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
 858 #ifndef PRODUCT
 859   if (trace_opto_pipelining())
 860     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
 861 #endif
 862 
 863   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
 864   Node *n;
 865 
 866   // Walk over all the nodes from last to first
 867   while (n = iter.next()) {
 868     // Set the latency for the definitions of this instruction
 869     partial_latency_of_defs(n);
 870   }
 871 } // end ComputeLatenciesBackwards
 872 
 873 //------------------------------partial_latency_of_defs------------------------
 874 // Compute the latency impact of this node on all defs.  This computes
 875 // a number that increases as we approach the beginning of the routine.
 876 void PhaseCFG::partial_latency_of_defs(Node *n) {
 877   // Set the latency for this instruction
 878 #ifndef PRODUCT
 879   if (trace_opto_pipelining()) {
 880     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
 881     dump();
 882   }
 883 #endif
 884 
 885   if (n->is_Proj()) {
 886     n = n->in(0);
 887   }
 888 
 889   if (n->is_Root()) {
 890     return;
 891   }
 892 
 893   uint nlen = n->len();
 894   uint use_latency = get_latency_for_node(n);
 895   uint use_pre_order = get_block_for_node(n)->_pre_order;
 896 
 897   for (uint j = 0; j < nlen; j++) {
 898     Node *def = n->in(j);
 899 
 900     if (!def || def == n) {
 901       continue;
 902     }
 903 
 904     // Walk backwards thru projections
 905     if (def->is_Proj()) {
 906       def = def->in(0);
 907     }
 908 
 909 #ifndef PRODUCT
 910     if (trace_opto_pipelining()) {
 911       tty->print("#    in(%2d): ", j);
 912       def->dump();
 913     }
 914 #endif
 915 
 916     // If the defining block is not known, assume it is ok
 917     Block *def_block = get_block_for_node(def);
 918     uint def_pre_order = def_block ? def_block->_pre_order : 0;
 919 
 920     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
 921       continue;
 922     }
 923 
 924     uint delta_latency = n->latency(j);
 925     uint current_latency = delta_latency + use_latency;
 926 
 927     if (get_latency_for_node(def) < current_latency) {
 928       set_latency_for_node(def, current_latency);
 929     }
 930 
 931 #ifndef PRODUCT
 932     if (trace_opto_pipelining()) {
 933       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
 934     }
 935 #endif
 936   }
 937 }
 938 
 939 //------------------------------latency_from_use-------------------------------
 940 // Compute the latency of a specific use
 941 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
 942   // If self-reference, return no latency
 943   if (use == n || use->is_Root()) {
 944     return 0;
 945   }
 946 
 947   uint def_pre_order = get_block_for_node(def)->_pre_order;
 948   uint latency = 0;
 949 
 950   // If the use is not a projection, then it is simple...
 951   if (!use->is_Proj()) {
 952 #ifndef PRODUCT
 953     if (trace_opto_pipelining()) {
 954       tty->print("#    out(): ");
 955       use->dump();
 956     }
 957 #endif
 958 
 959     uint use_pre_order = get_block_for_node(use)->_pre_order;
 960 
 961     if (use_pre_order < def_pre_order)
 962       return 0;
 963 
 964     if (use_pre_order == def_pre_order && use->is_Phi())
 965       return 0;
 966 
 967     uint nlen = use->len();
 968     uint nl = get_latency_for_node(use);
 969 
 970     for ( uint j=0; j<nlen; j++ ) {
 971       if (use->in(j) == n) {
 972         // Change this if we want local latencies
 973         uint ul = use->latency(j);
 974         uint  l = ul + nl;
 975         if (latency < l) latency = l;
 976 #ifndef PRODUCT
 977         if (trace_opto_pipelining()) {
 978           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
 979                         nl, j, ul, l, latency);
 980         }
 981 #endif
 982       }
 983     }
 984   } else {
 985     // This is a projection, just grab the latency of the use(s)
 986     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 987       uint l = latency_from_use(use, def, use->fast_out(j));
 988       if (latency < l) latency = l;
 989     }
 990   }
 991 
 992   return latency;
 993 }
 994 
 995 //------------------------------latency_from_uses------------------------------
 996 // Compute the latency of this instruction relative to all of it's uses.
 997 // This computes a number that increases as we approach the beginning of the
 998 // routine.
 999 void PhaseCFG::latency_from_uses(Node *n) {
1000   // Set the latency for this instruction
1001 #ifndef PRODUCT
1002   if (trace_opto_pipelining()) {
1003     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1004     dump();
1005   }
1006 #endif
1007   uint latency=0;
1008   const Node *def = n->is_Proj() ? n->in(0): n;
1009 
1010   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1011     uint l = latency_from_use(n, def, n->fast_out(i));
1012 
1013     if (latency < l) latency = l;
1014   }
1015 
1016   set_latency_for_node(n, latency);
1017 }
1018 
1019 //------------------------------hoist_to_cheaper_block-------------------------
1020 // Pick a block for node self, between early and LCA, that is a cheaper
1021 // alternative to LCA.
1022 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1023   const double delta = 1+PROB_UNLIKELY_MAG(4);
1024   Block* least       = LCA;
1025   double least_freq  = least->_freq;
1026   uint target        = get_latency_for_node(self);
1027   uint start_latency = get_latency_for_node(LCA->head());
1028   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1029   bool in_latency    = (target <= start_latency);
1030   const Block* root_block = get_block_for_node(_root);
1031 
1032   // Turn off latency scheduling if scheduling is just plain off
1033   if (!C->do_scheduling())
1034     in_latency = true;
1035 
1036   // Do not hoist (to cover latency) instructions which target a
1037   // single register.  Hoisting stretches the live range of the
1038   // single register and may force spilling.
1039   MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1040   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1041     in_latency = true;
1042 
1043 #ifndef PRODUCT
1044   if (trace_opto_pipelining()) {
1045     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1046     self->dump();
1047     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1048       LCA->_pre_order,
1049       LCA->head()->_idx,
1050       start_latency,
1051       LCA->get_node(LCA->end_idx())->_idx,
1052       end_latency,
1053       least_freq);
1054   }
1055 #endif
1056 
1057   int cand_cnt = 0;  // number of candidates tried
1058 
1059   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1060   // the earliest legal location.  Capture the least execution frequency.
1061   while (LCA != early) {
1062     LCA = LCA->_idom;         // Follow up the dominator tree
1063 
1064     if (LCA == NULL) {
1065       // Bailout without retry
1066       assert(false, "graph should be schedulable");
1067       C->record_method_not_compilable("late schedule failed: LCA == NULL");
1068       return least;
1069     }
1070 
1071     // Don't hoist machine instructions to the root basic block
1072     if (mach && LCA == root_block)
1073       break;
1074 
1075     uint start_lat = get_latency_for_node(LCA->head());
1076     uint end_idx   = LCA->end_idx();
1077     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1078     double LCA_freq = LCA->_freq;
1079 #ifndef PRODUCT
1080     if (trace_opto_pipelining()) {
1081       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1082         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1083     }
1084 #endif
1085     cand_cnt++;
1086     if (LCA_freq < least_freq              || // Better Frequency
1087         (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1088          (!StressGCM                    &&    // Otherwise, choose with latency
1089           !in_latency                   &&    // No block containing latency
1090           LCA_freq < least_freq * delta &&    // No worse frequency
1091           target >= end_lat             &&    // within latency range
1092           !self->is_iteratively_computed() )  // But don't hoist IV increments
1093              // because they may end up above other uses of their phi forcing
1094              // their result register to be different from their input.
1095        ) {
1096       least = LCA;            // Found cheaper block
1097       least_freq = LCA_freq;
1098       start_latency = start_lat;
1099       end_latency = end_lat;
1100       if (target <= start_lat)
1101         in_latency = true;
1102     }
1103   }
1104 
1105 #ifndef PRODUCT
1106   if (trace_opto_pipelining()) {
1107     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1108       least->_pre_order, start_latency, least_freq);
1109   }
1110 #endif
1111 
1112   // See if the latency needs to be updated
1113   if (target < end_latency) {
1114 #ifndef PRODUCT
1115     if (trace_opto_pipelining()) {
1116       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1117     }
1118 #endif
1119     set_latency_for_node(self, end_latency);
1120     partial_latency_of_defs(self);
1121   }
1122 
1123   return least;
1124 }
1125 
1126 
1127 //------------------------------schedule_late-----------------------------------
1128 // Now schedule all codes as LATE as possible.  This is the LCA in the
1129 // dominator tree of all USES of a value.  Pick the block with the least
1130 // loop nesting depth that is lowest in the dominator tree.
1131 extern const char must_clone[];
1132 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1133 #ifndef PRODUCT
1134   if (trace_opto_pipelining())
1135     tty->print("\n#---- schedule_late ----\n");
1136 #endif
1137 
1138   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1139   Node *self;
1140 
1141   // Walk over all the nodes from last to first
1142   while (self = iter.next()) {
1143     Block* early = get_block_for_node(self); // Earliest legal placement
1144 
1145     if (self->is_top()) {
1146       // Top node goes in bb #2 with other constants.
1147       // It must be special-cased, because it has no out edges.
1148       early->add_inst(self);
1149       continue;
1150     }
1151 
1152     // No uses, just terminate
1153     if (self->outcnt() == 0) {
1154       assert(self->is_MachProj(), "sanity");
1155       continue;                   // Must be a dead machine projection
1156     }
1157 
1158     // If node is pinned in the block, then no scheduling can be done.
1159     if( self->pinned() )          // Pinned in block?
1160       continue;
1161 
1162     MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1163     if (mach) {
1164       switch (mach->ideal_Opcode()) {
1165       case Op_CreateEx:
1166         // Don't move exception creation
1167         early->add_inst(self);
1168         continue;
1169         break;
1170       case Op_CheckCastPP:
1171         // Don't move CheckCastPP nodes away from their input, if the input
1172         // is a rawptr (5071820).
1173         Node *def = self->in(1);
1174         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1175           early->add_inst(self);
1176 #ifdef ASSERT
1177           _raw_oops.push(def);
1178 #endif
1179           continue;
1180         }
1181         break;
1182       }
1183     }
1184 
1185     // Gather LCA of all uses
1186     Block *LCA = NULL;
1187     {
1188       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1189         // For all uses, find LCA
1190         Node* use = self->fast_out(i);
1191         LCA = raise_LCA_above_use(LCA, use, self, this);
1192       }
1193     }  // (Hide defs of imax, i from rest of block.)
1194 
1195     // Place temps in the block of their use.  This isn't a
1196     // requirement for correctness but it reduces useless
1197     // interference between temps and other nodes.
1198     if (mach != NULL && mach->is_MachTemp()) {
1199       map_node_to_block(self, LCA);
1200       LCA->add_inst(self);
1201       continue;
1202     }
1203 
1204     // Check if 'self' could be anti-dependent on memory
1205     if (self->needs_anti_dependence_check()) {
1206       // Hoist LCA above possible-defs and insert anti-dependences to
1207       // defs in new LCA block.
1208       LCA = insert_anti_dependences(LCA, self);
1209     }
1210 
1211     if (early->_dom_depth > LCA->_dom_depth) {
1212       // Somehow the LCA has moved above the earliest legal point.
1213       // (One way this can happen is via memory_early_block.)
1214       if (C->subsume_loads() == true && !C->failing()) {
1215         // Retry with subsume_loads == false
1216         // If this is the first failure, the sentinel string will "stick"
1217         // to the Compile object, and the C2Compiler will see it and retry.
1218         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1219       } else {
1220         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1221         assert(false, "graph should be schedulable");
1222         C->record_method_not_compilable("late schedule failed: incorrect graph");
1223       }
1224       return;
1225     }
1226 
1227     // If there is no opportunity to hoist, then we're done.
1228     // In stress mode, try to hoist even the single operations.
1229     bool try_to_hoist = StressGCM || (LCA != early);
1230 
1231     // Must clone guys stay next to use; no hoisting allowed.
1232     // Also cannot hoist guys that alter memory or are otherwise not
1233     // allocatable (hoisting can make a value live longer, leading to
1234     // anti and output dependency problems which are normally resolved
1235     // by the register allocator giving everyone a different register).
1236     if (mach != NULL && must_clone[mach->ideal_Opcode()])
1237       try_to_hoist = false;
1238 
1239     Block* late = NULL;
1240     if (try_to_hoist) {
1241       // Now find the block with the least execution frequency.
1242       // Start at the latest schedule and work up to the earliest schedule
1243       // in the dominator tree.  Thus the Node will dominate all its uses.
1244       late = hoist_to_cheaper_block(LCA, early, self);
1245     } else {
1246       // Just use the LCA of the uses.
1247       late = LCA;
1248     }
1249 
1250     // Put the node into target block
1251     schedule_node_into_block(self, late);
1252 
1253 #ifdef ASSERT
1254     if (self->needs_anti_dependence_check()) {
1255       // since precedence edges are only inserted when we're sure they
1256       // are needed make sure that after placement in a block we don't
1257       // need any new precedence edges.
1258       verify_anti_dependences(late, self);
1259     }
1260 #endif
1261   } // Loop until all nodes have been visited
1262 
1263 } // end ScheduleLate
1264 
1265 //------------------------------GlobalCodeMotion-------------------------------
1266 void PhaseCFG::global_code_motion() {
1267   ResourceMark rm;
1268 
1269 #ifndef PRODUCT
1270   if (trace_opto_pipelining()) {
1271     tty->print("\n---- Start GlobalCodeMotion ----\n");
1272   }
1273 #endif
1274 
1275   // Initialize the node to block mapping for things on the proj_list
1276   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1277     unmap_node_from_block(_matcher.get_projection(i));
1278   }
1279 
1280   // Set the basic block for Nodes pinned into blocks
1281   Arena* arena = Thread::current()->resource_area();
1282   VectorSet visited(arena);
1283   schedule_pinned_nodes(visited);
1284 
1285   // Find the earliest Block any instruction can be placed in.  Some
1286   // instructions are pinned into Blocks.  Unpinned instructions can
1287   // appear in last block in which all their inputs occur.
1288   visited.Clear();
1289   Node_List stack(arena);
1290   // Pre-grow the list
1291   stack.map((C->live_nodes() >> 1) + 16, NULL);
1292   if (!schedule_early(visited, stack)) {
1293     // Bailout without retry
1294     C->record_method_not_compilable("early schedule failed");
1295     return;
1296   }
1297 
1298   // Build Def-Use edges.
1299   // Compute the latency information (via backwards walk) for all the
1300   // instructions in the graph
1301   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1302 
1303   if (C->do_scheduling()) {
1304     compute_latencies_backwards(visited, stack);
1305   }
1306 
1307   // Now schedule all codes as LATE as possible.  This is the LCA in the
1308   // dominator tree of all USES of a value.  Pick the block with the least
1309   // loop nesting depth that is lowest in the dominator tree.
1310   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1311   schedule_late(visited, stack);
1312   if (C->failing()) {
1313     return;
1314   }
1315 
1316 #ifndef PRODUCT
1317   if (trace_opto_pipelining()) {
1318     tty->print("\n---- Detect implicit null checks ----\n");
1319   }
1320 #endif
1321 
1322   // Detect implicit-null-check opportunities.  Basically, find NULL checks
1323   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
1324   // I can generate a memory op if there is not one nearby.
1325   if (C->is_method_compilation()) {
1326     // By reversing the loop direction we get a very minor gain on mpegaudio.
1327     // Feel free to revert to a forward loop for clarity.
1328     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1329     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1330       Node* proj = _matcher._null_check_tests[i];
1331       Node* val  = _matcher._null_check_tests[i + 1];
1332       Block* block = get_block_for_node(proj);
1333       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1334       // The implicit_null_check will only perform the transformation
1335       // if the null branch is truly uncommon, *and* it leads to an
1336       // uncommon trap.  Combined with the too_many_traps guards
1337       // above, this prevents SEGV storms reported in 6366351,
1338       // by recompiling offending methods without this optimization.
1339     }
1340   }
1341 
1342 #ifndef PRODUCT
1343   if (trace_opto_pipelining()) {
1344     tty->print("\n---- Start Local Scheduling ----\n");
1345   }
1346 #endif
1347 
1348   // Schedule locally.  Right now a simple topological sort.
1349   // Later, do a real latency aware scheduler.
1350   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1351   visited.Clear();
1352   for (uint i = 0; i < number_of_blocks(); i++) {
1353     Block* block = get_block(i);
1354     if (!schedule_local(block, ready_cnt, visited)) {
1355       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1356         C->record_method_not_compilable("local schedule failed");
1357       }
1358       return;
1359     }
1360   }
1361 
1362   // If we inserted any instructions between a Call and his CatchNode,
1363   // clone the instructions on all paths below the Catch.
1364   for (uint i = 0; i < number_of_blocks(); i++) {
1365     Block* block = get_block(i);
1366     call_catch_cleanup(block);
1367   }
1368 
1369 #ifndef PRODUCT
1370   if (trace_opto_pipelining()) {
1371     tty->print("\n---- After GlobalCodeMotion ----\n");
1372     for (uint i = 0; i < number_of_blocks(); i++) {
1373       Block* block = get_block(i);
1374       block->dump();
1375     }
1376   }
1377 #endif
1378   // Dead.
1379   _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1380 }
1381 
1382 bool PhaseCFG::do_global_code_motion() {
1383 
1384   build_dominator_tree();
1385   if (C->failing()) {
1386     return false;
1387   }
1388 
1389   NOT_PRODUCT( C->verify_graph_edges(); )
1390 
1391   estimate_block_frequency();
1392 
1393   global_code_motion();
1394 
1395   if (C->failing()) {
1396     return false;
1397   }
1398 
1399   return true;
1400 }
1401 
1402 //------------------------------Estimate_Block_Frequency-----------------------
1403 // Estimate block frequencies based on IfNode probabilities.
1404 void PhaseCFG::estimate_block_frequency() {
1405 
1406   // Force conditional branches leading to uncommon traps to be unlikely,
1407   // not because we get to the uncommon_trap with less relative frequency,
1408   // but because an uncommon_trap typically causes a deopt, so we only get
1409   // there once.
1410   if (C->do_freq_based_layout()) {
1411     Block_List worklist;
1412     Block* root_blk = get_block(0);
1413     for (uint i = 1; i < root_blk->num_preds(); i++) {
1414       Block *pb = get_block_for_node(root_blk->pred(i));
1415       if (pb->has_uncommon_code()) {
1416         worklist.push(pb);
1417       }
1418     }
1419     while (worklist.size() > 0) {
1420       Block* uct = worklist.pop();
1421       if (uct == get_root_block()) {
1422         continue;
1423       }
1424       for (uint i = 1; i < uct->num_preds(); i++) {
1425         Block *pb = get_block_for_node(uct->pred(i));
1426         if (pb->_num_succs == 1) {
1427           worklist.push(pb);
1428         } else if (pb->num_fall_throughs() == 2) {
1429           pb->update_uncommon_branch(uct);
1430         }
1431       }
1432     }
1433   }
1434 
1435   // Create the loop tree and calculate loop depth.
1436   _root_loop = create_loop_tree();
1437   _root_loop->compute_loop_depth(0);
1438 
1439   // Compute block frequency of each block, relative to a single loop entry.
1440   _root_loop->compute_freq();
1441 
1442   // Adjust all frequencies to be relative to a single method entry
1443   _root_loop->_freq = 1.0;
1444   _root_loop->scale_freq();
1445 
1446   // Save outmost loop frequency for LRG frequency threshold
1447   _outer_loop_frequency = _root_loop->outer_loop_freq();
1448 
1449   // force paths ending at uncommon traps to be infrequent
1450   if (!C->do_freq_based_layout()) {
1451     Block_List worklist;
1452     Block* root_blk = get_block(0);
1453     for (uint i = 1; i < root_blk->num_preds(); i++) {
1454       Block *pb = get_block_for_node(root_blk->pred(i));
1455       if (pb->has_uncommon_code()) {
1456         worklist.push(pb);
1457       }
1458     }
1459     while (worklist.size() > 0) {
1460       Block* uct = worklist.pop();
1461       uct->_freq = PROB_MIN;
1462       for (uint i = 1; i < uct->num_preds(); i++) {
1463         Block *pb = get_block_for_node(uct->pred(i));
1464         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1465           worklist.push(pb);
1466         }
1467       }
1468     }
1469   }
1470 
1471 #ifdef ASSERT
1472   for (uint i = 0; i < number_of_blocks(); i++) {
1473     Block* b = get_block(i);
1474     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1475   }
1476 #endif
1477 
1478 #ifndef PRODUCT
1479   if (PrintCFGBlockFreq) {
1480     tty->print_cr("CFG Block Frequencies");
1481     _root_loop->dump_tree();
1482     if (Verbose) {
1483       tty->print_cr("PhaseCFG dump");
1484       dump();
1485       tty->print_cr("Node dump");
1486       _root->dump(99999);
1487     }
1488   }
1489 #endif
1490 }
1491 
1492 //----------------------------create_loop_tree--------------------------------
1493 // Create a loop tree from the CFG
1494 CFGLoop* PhaseCFG::create_loop_tree() {
1495 
1496 #ifdef ASSERT
1497   assert(get_block(0) == get_root_block(), "first block should be root block");
1498   for (uint i = 0; i < number_of_blocks(); i++) {
1499     Block* block = get_block(i);
1500     // Check that _loop field are clear...we could clear them if not.
1501     assert(block->_loop == NULL, "clear _loop expected");
1502     // Sanity check that the RPO numbering is reflected in the _blocks array.
1503     // It doesn't have to be for the loop tree to be built, but if it is not,
1504     // then the blocks have been reordered since dom graph building...which
1505     // may question the RPO numbering
1506     assert(block->_rpo == i, "unexpected reverse post order number");
1507   }
1508 #endif
1509 
1510   int idct = 0;
1511   CFGLoop* root_loop = new CFGLoop(idct++);
1512 
1513   Block_List worklist;
1514 
1515   // Assign blocks to loops
1516   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1517     Block* block = get_block(i);
1518 
1519     if (block->head()->is_Loop()) {
1520       Block* loop_head = block;
1521       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1522       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1523       Block* tail = get_block_for_node(tail_n);
1524 
1525       // Defensively filter out Loop nodes for non-single-entry loops.
1526       // For all reasonable loops, the head occurs before the tail in RPO.
1527       if (i <= tail->_rpo) {
1528 
1529         // The tail and (recursive) predecessors of the tail
1530         // are made members of a new loop.
1531 
1532         assert(worklist.size() == 0, "nonempty worklist");
1533         CFGLoop* nloop = new CFGLoop(idct++);
1534         assert(loop_head->_loop == NULL, "just checking");
1535         loop_head->_loop = nloop;
1536         // Add to nloop so push_pred() will skip over inner loops
1537         nloop->add_member(loop_head);
1538         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1539 
1540         while (worklist.size() > 0) {
1541           Block* member = worklist.pop();
1542           if (member != loop_head) {
1543             for (uint j = 1; j < member->num_preds(); j++) {
1544               nloop->push_pred(member, j, worklist, this);
1545             }
1546           }
1547         }
1548       }
1549     }
1550   }
1551 
1552   // Create a member list for each loop consisting
1553   // of both blocks and (immediate child) loops.
1554   for (uint i = 0; i < number_of_blocks(); i++) {
1555     Block* block = get_block(i);
1556     CFGLoop* lp = block->_loop;
1557     if (lp == NULL) {
1558       // Not assigned to a loop. Add it to the method's pseudo loop.
1559       block->_loop = root_loop;
1560       lp = root_loop;
1561     }
1562     if (lp == root_loop || block != lp->head()) { // loop heads are already members
1563       lp->add_member(block);
1564     }
1565     if (lp != root_loop) {
1566       if (lp->parent() == NULL) {
1567         // Not a nested loop. Make it a child of the method's pseudo loop.
1568         root_loop->add_nested_loop(lp);
1569       }
1570       if (block == lp->head()) {
1571         // Add nested loop to member list of parent loop.
1572         lp->parent()->add_member(lp);
1573       }
1574     }
1575   }
1576 
1577   return root_loop;
1578 }
1579 
1580 //------------------------------push_pred--------------------------------------
1581 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1582   Node* pred_n = blk->pred(i);
1583   Block* pred = cfg->get_block_for_node(pred_n);
1584   CFGLoop *pred_loop = pred->_loop;
1585   if (pred_loop == NULL) {
1586     // Filter out blocks for non-single-entry loops.
1587     // For all reasonable loops, the head occurs before the tail in RPO.
1588     if (pred->_rpo > head()->_rpo) {
1589       pred->_loop = this;
1590       worklist.push(pred);
1591     }
1592   } else if (pred_loop != this) {
1593     // Nested loop.
1594     while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1595       pred_loop = pred_loop->_parent;
1596     }
1597     // Make pred's loop be a child
1598     if (pred_loop->_parent == NULL) {
1599       add_nested_loop(pred_loop);
1600       // Continue with loop entry predecessor.
1601       Block* pred_head = pred_loop->head();
1602       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1603       assert(pred_head != head(), "loop head in only one loop");
1604       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1605     } else {
1606       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1607     }
1608   }
1609 }
1610 
1611 //------------------------------add_nested_loop--------------------------------
1612 // Make cl a child of the current loop in the loop tree.
1613 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1614   assert(_parent == NULL, "no parent yet");
1615   assert(cl != this, "not my own parent");
1616   cl->_parent = this;
1617   CFGLoop* ch = _child;
1618   if (ch == NULL) {
1619     _child = cl;
1620   } else {
1621     while (ch->_sibling != NULL) { ch = ch->_sibling; }
1622     ch->_sibling = cl;
1623   }
1624 }
1625 
1626 //------------------------------compute_loop_depth-----------------------------
1627 // Store the loop depth in each CFGLoop object.
1628 // Recursively walk the children to do the same for them.
1629 void CFGLoop::compute_loop_depth(int depth) {
1630   _depth = depth;
1631   CFGLoop* ch = _child;
1632   while (ch != NULL) {
1633     ch->compute_loop_depth(depth + 1);
1634     ch = ch->_sibling;
1635   }
1636 }
1637 
1638 //------------------------------compute_freq-----------------------------------
1639 // Compute the frequency of each block and loop, relative to a single entry
1640 // into the dominating loop head.
1641 void CFGLoop::compute_freq() {
1642   // Bottom up traversal of loop tree (visit inner loops first.)
1643   // Set loop head frequency to 1.0, then transitively
1644   // compute frequency for all successors in the loop,
1645   // as well as for each exit edge.  Inner loops are
1646   // treated as single blocks with loop exit targets
1647   // as the successor blocks.
1648 
1649   // Nested loops first
1650   CFGLoop* ch = _child;
1651   while (ch != NULL) {
1652     ch->compute_freq();
1653     ch = ch->_sibling;
1654   }
1655   assert (_members.length() > 0, "no empty loops");
1656   Block* hd = head();
1657   hd->_freq = 1.0f;
1658   for (int i = 0; i < _members.length(); i++) {
1659     CFGElement* s = _members.at(i);
1660     float freq = s->_freq;
1661     if (s->is_block()) {
1662       Block* b = s->as_Block();
1663       for (uint j = 0; j < b->_num_succs; j++) {
1664         Block* sb = b->_succs[j];
1665         update_succ_freq(sb, freq * b->succ_prob(j));
1666       }
1667     } else {
1668       CFGLoop* lp = s->as_CFGLoop();
1669       assert(lp->_parent == this, "immediate child");
1670       for (int k = 0; k < lp->_exits.length(); k++) {
1671         Block* eb = lp->_exits.at(k).get_target();
1672         float prob = lp->_exits.at(k).get_prob();
1673         update_succ_freq(eb, freq * prob);
1674       }
1675     }
1676   }
1677 
1678   // For all loops other than the outer, "method" loop,
1679   // sum and normalize the exit probability. The "method" loop
1680   // should keep the initial exit probability of 1, so that
1681   // inner blocks do not get erroneously scaled.
1682   if (_depth != 0) {
1683     // Total the exit probabilities for this loop.
1684     float exits_sum = 0.0f;
1685     for (int i = 0; i < _exits.length(); i++) {
1686       exits_sum += _exits.at(i).get_prob();
1687     }
1688 
1689     // Normalize the exit probabilities. Until now, the
1690     // probabilities estimate the possibility of exit per
1691     // a single loop iteration; afterward, they estimate
1692     // the probability of exit per loop entry.
1693     for (int i = 0; i < _exits.length(); i++) {
1694       Block* et = _exits.at(i).get_target();
1695       float new_prob = 0.0f;
1696       if (_exits.at(i).get_prob() > 0.0f) {
1697         new_prob = _exits.at(i).get_prob() / exits_sum;
1698       }
1699       BlockProbPair bpp(et, new_prob);
1700       _exits.at_put(i, bpp);
1701     }
1702 
1703     // Save the total, but guard against unreasonable probability,
1704     // as the value is used to estimate the loop trip count.
1705     // An infinite trip count would blur relative block
1706     // frequencies.
1707     if (exits_sum > 1.0f) exits_sum = 1.0;
1708     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1709     _exit_prob = exits_sum;
1710   }
1711 }
1712 
1713 //------------------------------succ_prob-------------------------------------
1714 // Determine the probability of reaching successor 'i' from the receiver block.
1715 float Block::succ_prob(uint i) {
1716   int eidx = end_idx();
1717   Node *n = get_node(eidx);  // Get ending Node
1718 
1719   int op = n->Opcode();
1720   if (n->is_Mach()) {
1721     if (n->is_MachNullCheck()) {
1722       // Can only reach here if called after lcm. The original Op_If is gone,
1723       // so we attempt to infer the probability from one or both of the
1724       // successor blocks.
1725       assert(_num_succs == 2, "expecting 2 successors of a null check");
1726       // If either successor has only one predecessor, then the
1727       // probability estimate can be derived using the
1728       // relative frequency of the successor and this block.
1729       if (_succs[i]->num_preds() == 2) {
1730         return _succs[i]->_freq / _freq;
1731       } else if (_succs[1-i]->num_preds() == 2) {
1732         return 1 - (_succs[1-i]->_freq / _freq);
1733       } else {
1734         // Estimate using both successor frequencies
1735         float freq = _succs[i]->_freq;
1736         return freq / (freq + _succs[1-i]->_freq);
1737       }
1738     }
1739     op = n->as_Mach()->ideal_Opcode();
1740   }
1741 
1742 
1743   // Switch on branch type
1744   switch( op ) {
1745   case Op_CountedLoopEnd:
1746   case Op_If: {
1747     assert (i < 2, "just checking");
1748     // Conditionals pass on only part of their frequency
1749     float prob  = n->as_MachIf()->_prob;
1750     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1751     // If succ[i] is the FALSE branch, invert path info
1752     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
1753       return 1.0f - prob; // not taken
1754     } else {
1755       return prob; // taken
1756     }
1757   }
1758 
1759   case Op_Jump:
1760     // Divide the frequency between all successors evenly
1761     return 1.0f/_num_succs;
1762 
1763   case Op_Catch: {
1764     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1765     if (ci->_con == CatchProjNode::fall_through_index) {
1766       // Fall-thru path gets the lion's share.
1767       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1768     } else {
1769       // Presume exceptional paths are equally unlikely
1770       return PROB_UNLIKELY_MAG(5);
1771     }
1772   }
1773 
1774   case Op_Root:
1775   case Op_Goto:
1776     // Pass frequency straight thru to target
1777     return 1.0f;
1778 
1779   case Op_NeverBranch:
1780     return 0.0f;
1781 
1782   case Op_TailCall:
1783   case Op_TailJump:
1784   case Op_Return:
1785   case Op_Halt:
1786   case Op_Rethrow:
1787     // Do not push out freq to root block
1788     return 0.0f;
1789 
1790   default:
1791     ShouldNotReachHere();
1792   }
1793 
1794   return 0.0f;
1795 }
1796 
1797 //------------------------------num_fall_throughs-----------------------------
1798 // Return the number of fall-through candidates for a block
1799 int Block::num_fall_throughs() {
1800   int eidx = end_idx();
1801   Node *n = get_node(eidx);  // Get ending Node
1802 
1803   int op = n->Opcode();
1804   if (n->is_Mach()) {
1805     if (n->is_MachNullCheck()) {
1806       // In theory, either side can fall-thru, for simplicity sake,
1807       // let's say only the false branch can now.
1808       return 1;
1809     }
1810     op = n->as_Mach()->ideal_Opcode();
1811   }
1812 
1813   // Switch on branch type
1814   switch( op ) {
1815   case Op_CountedLoopEnd:
1816   case Op_If:
1817     return 2;
1818 
1819   case Op_Root:
1820   case Op_Goto:
1821     return 1;
1822 
1823   case Op_Catch: {
1824     for (uint i = 0; i < _num_succs; i++) {
1825       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1826       if (ci->_con == CatchProjNode::fall_through_index) {
1827         return 1;
1828       }
1829     }
1830     return 0;
1831   }
1832 
1833   case Op_Jump:
1834   case Op_NeverBranch:
1835   case Op_TailCall:
1836   case Op_TailJump:
1837   case Op_Return:
1838   case Op_Halt:
1839   case Op_Rethrow:
1840     return 0;
1841 
1842   default:
1843     ShouldNotReachHere();
1844   }
1845 
1846   return 0;
1847 }
1848 
1849 //------------------------------succ_fall_through-----------------------------
1850 // Return true if a specific successor could be fall-through target.
1851 bool Block::succ_fall_through(uint i) {
1852   int eidx = end_idx();
1853   Node *n = get_node(eidx);  // Get ending Node
1854 
1855   int op = n->Opcode();
1856   if (n->is_Mach()) {
1857     if (n->is_MachNullCheck()) {
1858       // In theory, either side can fall-thru, for simplicity sake,
1859       // let's say only the false branch can now.
1860       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
1861     }
1862     op = n->as_Mach()->ideal_Opcode();
1863   }
1864 
1865   // Switch on branch type
1866   switch( op ) {
1867   case Op_CountedLoopEnd:
1868   case Op_If:
1869   case Op_Root:
1870   case Op_Goto:
1871     return true;
1872 
1873   case Op_Catch: {
1874     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1875     return ci->_con == CatchProjNode::fall_through_index;
1876   }
1877 
1878   case Op_Jump:
1879   case Op_NeverBranch:
1880   case Op_TailCall:
1881   case Op_TailJump:
1882   case Op_Return:
1883   case Op_Halt:
1884   case Op_Rethrow:
1885     return false;
1886 
1887   default:
1888     ShouldNotReachHere();
1889   }
1890 
1891   return false;
1892 }
1893 
1894 //------------------------------update_uncommon_branch------------------------
1895 // Update the probability of a two-branch to be uncommon
1896 void Block::update_uncommon_branch(Block* ub) {
1897   int eidx = end_idx();
1898   Node *n = get_node(eidx);  // Get ending Node
1899 
1900   int op = n->as_Mach()->ideal_Opcode();
1901 
1902   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
1903   assert(num_fall_throughs() == 2, "must be a two way branch block");
1904 
1905   // Which successor is ub?
1906   uint s;
1907   for (s = 0; s <_num_succs; s++) {
1908     if (_succs[s] == ub) break;
1909   }
1910   assert(s < 2, "uncommon successor must be found");
1911 
1912   // If ub is the true path, make the proability small, else
1913   // ub is the false path, and make the probability large
1914   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
1915 
1916   // Get existing probability
1917   float p = n->as_MachIf()->_prob;
1918 
1919   if (invert) p = 1.0 - p;
1920   if (p > PROB_MIN) {
1921     p = PROB_MIN;
1922   }
1923   if (invert) p = 1.0 - p;
1924 
1925   n->as_MachIf()->_prob = p;
1926 }
1927 
1928 //------------------------------update_succ_freq-------------------------------
1929 // Update the appropriate frequency associated with block 'b', a successor of
1930 // a block in this loop.
1931 void CFGLoop::update_succ_freq(Block* b, float freq) {
1932   if (b->_loop == this) {
1933     if (b == head()) {
1934       // back branch within the loop
1935       // Do nothing now, the loop carried frequency will be
1936       // adjust later in scale_freq().
1937     } else {
1938       // simple branch within the loop
1939       b->_freq += freq;
1940     }
1941   } else if (!in_loop_nest(b)) {
1942     // branch is exit from this loop
1943     BlockProbPair bpp(b, freq);
1944     _exits.append(bpp);
1945   } else {
1946     // branch into nested loop
1947     CFGLoop* ch = b->_loop;
1948     ch->_freq += freq;
1949   }
1950 }
1951 
1952 //------------------------------in_loop_nest-----------------------------------
1953 // Determine if block b is in the receiver's loop nest.
1954 bool CFGLoop::in_loop_nest(Block* b) {
1955   int depth = _depth;
1956   CFGLoop* b_loop = b->_loop;
1957   int b_depth = b_loop->_depth;
1958   if (depth == b_depth) {
1959     return true;
1960   }
1961   while (b_depth > depth) {
1962     b_loop = b_loop->_parent;
1963     b_depth = b_loop->_depth;
1964   }
1965   return b_loop == this;
1966 }
1967 
1968 //------------------------------scale_freq-------------------------------------
1969 // Scale frequency of loops and blocks by trip counts from outer loops
1970 // Do a top down traversal of loop tree (visit outer loops first.)
1971 void CFGLoop::scale_freq() {
1972   float loop_freq = _freq * trip_count();
1973   _freq = loop_freq;
1974   for (int i = 0; i < _members.length(); i++) {
1975     CFGElement* s = _members.at(i);
1976     float block_freq = s->_freq * loop_freq;
1977     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
1978       block_freq = MIN_BLOCK_FREQUENCY;
1979     s->_freq = block_freq;
1980   }
1981   CFGLoop* ch = _child;
1982   while (ch != NULL) {
1983     ch->scale_freq();
1984     ch = ch->_sibling;
1985   }
1986 }
1987 
1988 // Frequency of outer loop
1989 float CFGLoop::outer_loop_freq() const {
1990   if (_child != NULL) {
1991     return _child->_freq;
1992   }
1993   return _freq;
1994 }
1995 
1996 #ifndef PRODUCT
1997 //------------------------------dump_tree--------------------------------------
1998 void CFGLoop::dump_tree() const {
1999   dump();
2000   if (_child != NULL)   _child->dump_tree();
2001   if (_sibling != NULL) _sibling->dump_tree();
2002 }
2003 
2004 //------------------------------dump-------------------------------------------
2005 void CFGLoop::dump() const {
2006   for (int i = 0; i < _depth; i++) tty->print("   ");
2007   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2008              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2009   for (int i = 0; i < _depth; i++) tty->print("   ");
2010   tty->print("         members:");
2011   int k = 0;
2012   for (int i = 0; i < _members.length(); i++) {
2013     if (k++ >= 6) {
2014       tty->print("\n              ");
2015       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2016       k = 0;
2017     }
2018     CFGElement *s = _members.at(i);
2019     if (s->is_block()) {
2020       Block *b = s->as_Block();
2021       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2022     } else {
2023       CFGLoop* lp = s->as_CFGLoop();
2024       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2025     }
2026   }
2027   tty->print("\n");
2028   for (int i = 0; i < _depth; i++) tty->print("   ");
2029   tty->print("         exits:  ");
2030   k = 0;
2031   for (int i = 0; i < _exits.length(); i++) {
2032     if (k++ >= 7) {
2033       tty->print("\n              ");
2034       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2035       k = 0;
2036     }
2037     Block *blk = _exits.at(i).get_target();
2038     float prob = _exits.at(i).get_prob();
2039     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2040   }
2041   tty->print("\n");
2042 }
2043 #endif