1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "libadt/vectset.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "opto/block.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/machnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "runtime/deoptimization.hpp"
  38 #ifdef TARGET_ARCH_MODEL_x86_32
  39 # include "adfiles/ad_x86_32.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_MODEL_x86_64
  42 # include "adfiles/ad_x86_64.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_MODEL_sparc
  45 # include "adfiles/ad_sparc.hpp"
  46 #endif
  47 #ifdef TARGET_ARCH_MODEL_zero
  48 # include "adfiles/ad_zero.hpp"
  49 #endif
  50 #ifdef TARGET_ARCH_MODEL_arm
  51 # include "adfiles/ad_arm.hpp"
  52 #endif
  53 #ifdef TARGET_ARCH_MODEL_ppc
  54 # include "adfiles/ad_ppc.hpp"
  55 #endif
  56 
  57 // Portions of code courtesy of Clifford Click
  58 
  59 // Optimization - Graph Style
  60 
  61 // To avoid float value underflow
  62 #define MIN_BLOCK_FREQUENCY 1.e-35f
  63 
  64 //----------------------------schedule_node_into_block-------------------------
  65 // Insert node n into block b. Look for projections of n and make sure they
  66 // are in b also.
  67 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  68   // Set basic block of n, Add n to b,
  69   _bbs.map(n->_idx, b);
  70   b->add_inst(n);
  71 
  72   // After Matching, nearly any old Node may have projections trailing it.
  73   // These are usually machine-dependent flags.  In any case, they might
  74   // float to another block below this one.  Move them up.
  75   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  76     Node*  use  = n->fast_out(i);
  77     if (use->is_Proj()) {
  78       Block* buse = _bbs[use->_idx];
  79       if (buse != b) {              // In wrong block?
  80         if (buse != NULL)
  81           buse->find_remove(use);   // Remove from wrong block
  82         _bbs.map(use->_idx, b);     // Re-insert in this block
  83         b->add_inst(use);
  84       }
  85     }
  86   }
  87 }
  88 
  89 //----------------------------replace_block_proj_ctrl-------------------------
  90 // Nodes that have is_block_proj() nodes as their control need to use
  91 // the appropriate Region for their actual block as their control since
  92 // the projection will be in a predecessor block.
  93 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  94   const Node *in0 = n->in(0);
  95   assert(in0 != NULL, "Only control-dependent");
  96   const Node *p = in0->is_block_proj();
  97   if (p != NULL && p != n) {    // Control from a block projection?
  98     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  99     // Find trailing Region
 100     Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
 101     uint j = 0;
 102     if (pb->_num_succs != 1) {  // More then 1 successor?
 103       // Search for successor
 104       uint max = pb->_nodes.size();
 105       assert( max > 1, "" );
 106       uint start = max - pb->_num_succs;
 107       // Find which output path belongs to projection
 108       for (j = start; j < max; j++) {
 109         if( pb->_nodes[j] == in0 )
 110           break;
 111       }
 112       assert( j < max, "must find" );
 113       // Change control to match head of successor basic block
 114       j -= start;
 115     }
 116     n->set_req(0, pb->_succs[j]->head());
 117   }
 118 }
 119 
 120 
 121 //------------------------------schedule_pinned_nodes--------------------------
 122 // Set the basic block for Nodes pinned into blocks
 123 void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
 124   // Allocate node stack of size C->unique()+8 to avoid frequent realloc
 125   GrowableArray <Node *> spstack(C->unique()+8);
 126   spstack.push(_root);
 127   while ( spstack.is_nonempty() ) {
 128     Node *n = spstack.pop();
 129     if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
 130       if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
 131         assert( n->in(0), "pinned Node must have Control" );
 132         // Before setting block replace block_proj control edge
 133         replace_block_proj_ctrl(n);
 134         Node *input = n->in(0);
 135         while( !input->is_block_start() )
 136           input = input->in(0);
 137         Block *b = _bbs[input->_idx];  // Basic block of controlling input
 138         schedule_node_into_block(n, b);
 139       }
 140       for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
 141         if( n->in(i) != NULL )
 142           spstack.push(n->in(i));
 143       }
 144     }
 145   }
 146 }
 147 
 148 #ifdef ASSERT
 149 // Assert that new input b2 is dominated by all previous inputs.
 150 // Check this by by seeing that it is dominated by b1, the deepest
 151 // input observed until b2.
 152 static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
 153   if (b1 == NULL)  return;
 154   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 155   Block* tmp = b2;
 156   while (tmp != b1 && tmp != NULL) {
 157     tmp = tmp->_idom;
 158   }
 159   if (tmp != b1) {
 160     // Detected an unschedulable graph.  Print some nice stuff and die.
 161     tty->print_cr("!!! Unschedulable graph !!!");
 162     for (uint j=0; j<n->len(); j++) { // For all inputs
 163       Node* inn = n->in(j); // Get input
 164       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
 165       Block* inb = bbs[inn->_idx];
 166       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 167                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 168       inn->dump();
 169     }
 170     tty->print("Failing node: ");
 171     n->dump();
 172     assert(false, "unscheduable graph");
 173   }
 174 }
 175 #endif
 176 
 177 static Block* find_deepest_input(Node* n, Block_Array &bbs) {
 178   // Find the last input dominated by all other inputs.
 179   Block* deepb           = NULL;        // Deepest block so far
 180   int    deepb_dom_depth = 0;
 181   for (uint k = 0; k < n->len(); k++) { // For all inputs
 182     Node* inn = n->in(k);               // Get input
 183     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
 184     Block* inb = bbs[inn->_idx];
 185     assert(inb != NULL, "must already have scheduled this input");
 186     if (deepb_dom_depth < (int) inb->_dom_depth) {
 187       // The new inb must be dominated by the previous deepb.
 188       // The various inputs must be linearly ordered in the dom
 189       // tree, or else there will not be a unique deepest block.
 190       DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
 191       deepb = inb;                      // Save deepest block
 192       deepb_dom_depth = deepb->_dom_depth;
 193     }
 194   }
 195   assert(deepb != NULL, "must be at least one input to n");
 196   return deepb;
 197 }
 198 
 199 
 200 //------------------------------schedule_early---------------------------------
 201 // Find the earliest Block any instruction can be placed in.  Some instructions
 202 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 203 // which all their inputs occur.
 204 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
 205   // Allocate stack with enough space to avoid frequent realloc
 206   Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats
 207   // roots.push(_root); _root will be processed among C->top() inputs
 208   roots.push(C->top());
 209   visited.set(C->top()->_idx);
 210 
 211   while (roots.size() != 0) {
 212     // Use local variables nstack_top_n & nstack_top_i to cache values
 213     // on stack's top.
 214     Node *nstack_top_n = roots.pop();
 215     uint  nstack_top_i = 0;
 216 //while_nstack_nonempty:
 217     while (true) {
 218       // Get parent node and next input's index from stack's top.
 219       Node *n = nstack_top_n;
 220       uint  i = nstack_top_i;
 221 
 222       if (i == 0) {
 223         // Fixup some control.  Constants without control get attached
 224         // to root and nodes that use is_block_proj() nodes should be attached
 225         // to the region that starts their block.
 226         const Node *in0 = n->in(0);
 227         if (in0 != NULL) {              // Control-dependent?
 228           replace_block_proj_ctrl(n);
 229         } else {               // n->in(0) == NULL
 230           if (n->req() == 1) { // This guy is a constant with NO inputs?
 231             n->set_req(0, _root);
 232           }
 233         }
 234       }
 235 
 236       // First, visit all inputs and force them to get a block.  If an
 237       // input is already in a block we quit following inputs (to avoid
 238       // cycles). Instead we put that Node on a worklist to be handled
 239       // later (since IT'S inputs may not have a block yet).
 240       bool done = true;              // Assume all n's inputs will be processed
 241       while (i < n->len()) {         // For all inputs
 242         Node *in = n->in(i);         // Get input
 243         ++i;
 244         if (in == NULL) continue;    // Ignore NULL, missing inputs
 245         int is_visited = visited.test_set(in->_idx);
 246         if (!_bbs.lookup(in->_idx)) { // Missing block selection?
 247           if (is_visited) {
 248             // assert( !visited.test(in->_idx), "did not schedule early" );
 249             return false;
 250           }
 251           nstack.push(n, i);         // Save parent node and next input's index.
 252           nstack_top_n = in;         // Process current input now.
 253           nstack_top_i = 0;
 254           done = false;              // Not all n's inputs processed.
 255           break; // continue while_nstack_nonempty;
 256         } else if (!is_visited) {    // Input not yet visited?
 257           roots.push(in);            // Visit this guy later, using worklist
 258         }
 259       }
 260       if (done) {
 261         // All of n's inputs have been processed, complete post-processing.
 262 
 263         // Some instructions are pinned into a block.  These include Region,
 264         // Phi, Start, Return, and other control-dependent instructions and
 265         // any projections which depend on them.
 266         if (!n->pinned()) {
 267           // Set earliest legal block.
 268           _bbs.map(n->_idx, find_deepest_input(n, _bbs));
 269         } else {
 270           assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
 271         }
 272 
 273         if (nstack.is_empty()) {
 274           // Finished all nodes on stack.
 275           // Process next node on the worklist 'roots'.
 276           break;
 277         }
 278         // Get saved parent node and next input's index.
 279         nstack_top_n = nstack.node();
 280         nstack_top_i = nstack.index();
 281         nstack.pop();
 282       } //    if (done)
 283     }   // while (true)
 284   }     // while (roots.size() != 0)
 285   return true;
 286 }
 287 
 288 //------------------------------dom_lca----------------------------------------
 289 // Find least common ancestor in dominator tree
 290 // LCA is a current notion of LCA, to be raised above 'this'.
 291 // As a convenient boundary condition, return 'this' if LCA is NULL.
 292 // Find the LCA of those two nodes.
 293 Block* Block::dom_lca(Block* LCA) {
 294   if (LCA == NULL || LCA == this)  return this;
 295 
 296   Block* anc = this;
 297   while (anc->_dom_depth > LCA->_dom_depth)
 298     anc = anc->_idom;           // Walk up till anc is as high as LCA
 299 
 300   while (LCA->_dom_depth > anc->_dom_depth)
 301     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 302 
 303   while (LCA != anc) {          // Walk both up till they are the same
 304     LCA = LCA->_idom;
 305     anc = anc->_idom;
 306   }
 307 
 308   return LCA;
 309 }
 310 
 311 //--------------------------raise_LCA_above_use--------------------------------
 312 // We are placing a definition, and have been given a def->use edge.
 313 // The definition must dominate the use, so move the LCA upward in the
 314 // dominator tree to dominate the use.  If the use is a phi, adjust
 315 // the LCA only with the phi input paths which actually use this def.
 316 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
 317   Block* buse = bbs[use->_idx];
 318   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
 319   if (!use->is_Phi())  return buse->dom_lca(LCA);
 320   uint pmax = use->req();       // Number of Phi inputs
 321   // Why does not this loop just break after finding the matching input to
 322   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 323   // chains.  Means I cannot distinguish, from the def-use direction, which
 324   // of many use-defs lead from the same use to the same def.  That is, this
 325   // Phi might have several uses of the same def.  Each use appears in a
 326   // different predecessor block.  But when I enter here, I cannot distinguish
 327   // which use-def edge I should find the predecessor block for.  So I find
 328   // them all.  Means I do a little extra work if a Phi uses the same value
 329   // more than once.
 330   for (uint j=1; j<pmax; j++) { // For all inputs
 331     if (use->in(j) == def) {    // Found matching input?
 332       Block* pred = bbs[buse->pred(j)->_idx];
 333       LCA = pred->dom_lca(LCA);
 334     }
 335   }
 336   return LCA;
 337 }
 338 
 339 //----------------------------raise_LCA_above_marks----------------------------
 340 // Return a new LCA that dominates LCA and any of its marked predecessors.
 341 // Search all my parents up to 'early' (exclusive), looking for predecessors
 342 // which are marked with the given index.  Return the LCA (in the dom tree)
 343 // of all marked blocks.  If there are none marked, return the original
 344 // LCA.
 345 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
 346                                     Block* early, Block_Array &bbs) {
 347   Block_List worklist;
 348   worklist.push(LCA);
 349   while (worklist.size() > 0) {
 350     Block* mid = worklist.pop();
 351     if (mid == early)  continue;  // stop searching here
 352 
 353     // Test and set the visited bit.
 354     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 355 
 356     // Don't process the current LCA, otherwise the search may terminate early
 357     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 358       // Raise the LCA.
 359       LCA = mid->dom_lca(LCA);
 360       if (LCA == early)  break;   // stop searching everywhere
 361       assert(early->dominates(LCA), "early is high enough");
 362       // Resume searching at that point, skipping intermediate levels.
 363       worklist.push(LCA);
 364       if (LCA == mid)
 365         continue; // Don't mark as visited to avoid early termination.
 366     } else {
 367       // Keep searching through this block's predecessors.
 368       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 369         Block* mid_parent = bbs[ mid->pred(j)->_idx ];
 370         worklist.push(mid_parent);
 371       }
 372     }
 373     mid->set_raise_LCA_visited(mark);
 374   }
 375   return LCA;
 376 }
 377 
 378 //--------------------------memory_early_block--------------------------------
 379 // This is a variation of find_deepest_input, the heart of schedule_early.
 380 // Find the "early" block for a load, if we considered only memory and
 381 // address inputs, that is, if other data inputs were ignored.
 382 //
 383 // Because a subset of edges are considered, the resulting block will
 384 // be earlier (at a shallower dom_depth) than the true schedule_early
 385 // point of the node. We compute this earlier block as a more permissive
 386 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 387 static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
 388   Node* base;
 389   Node* index;
 390   Node* store = load->in(MemNode::Memory);
 391   load->as_Mach()->memory_inputs(base, index);
 392 
 393   assert(base != NodeSentinel && index != NodeSentinel,
 394          "unexpected base/index inputs");
 395 
 396   Node* mem_inputs[4];
 397   int mem_inputs_length = 0;
 398   if (base != NULL)  mem_inputs[mem_inputs_length++] = base;
 399   if (index != NULL) mem_inputs[mem_inputs_length++] = index;
 400   if (store != NULL) mem_inputs[mem_inputs_length++] = store;
 401 
 402   // In the comparision below, add one to account for the control input,
 403   // which may be null, but always takes up a spot in the in array.
 404   if (mem_inputs_length + 1 < (int) load->req()) {
 405     // This "load" has more inputs than just the memory, base and index inputs.
 406     // For purposes of checking anti-dependences, we need to start
 407     // from the early block of only the address portion of the instruction,
 408     // and ignore other blocks that may have factored into the wider
 409     // schedule_early calculation.
 410     if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
 411 
 412     Block* deepb           = NULL;        // Deepest block so far
 413     int    deepb_dom_depth = 0;
 414     for (int i = 0; i < mem_inputs_length; i++) {
 415       Block* inb = bbs[mem_inputs[i]->_idx];
 416       if (deepb_dom_depth < (int) inb->_dom_depth) {
 417         // The new inb must be dominated by the previous deepb.
 418         // The various inputs must be linearly ordered in the dom
 419         // tree, or else there will not be a unique deepest block.
 420         DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
 421         deepb = inb;                      // Save deepest block
 422         deepb_dom_depth = deepb->_dom_depth;
 423       }
 424     }
 425     early = deepb;
 426   }
 427 
 428   return early;
 429 }
 430 
 431 //--------------------------insert_anti_dependences---------------------------
 432 // A load may need to witness memory that nearby stores can overwrite.
 433 // For each nearby store, either insert an "anti-dependence" edge
 434 // from the load to the store, or else move LCA upward to force the
 435 // load to (eventually) be scheduled in a block above the store.
 436 //
 437 // Do not add edges to stores on distinct control-flow paths;
 438 // only add edges to stores which might interfere.
 439 //
 440 // Return the (updated) LCA.  There will not be any possibly interfering
 441 // store between the load's "early block" and the updated LCA.
 442 // Any stores in the updated LCA will have new precedence edges
 443 // back to the load.  The caller is expected to schedule the load
 444 // in the LCA, in which case the precedence edges will make LCM
 445 // preserve anti-dependences.  The caller may also hoist the load
 446 // above the LCA, if it is not the early block.
 447 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
 448   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 449   assert(LCA != NULL, "");
 450   DEBUG_ONLY(Block* LCA_orig = LCA);
 451 
 452   // Compute the alias index.  Loads and stores with different alias indices
 453   // do not need anti-dependence edges.
 454   uint load_alias_idx = C->get_alias_index(load->adr_type());
 455 #ifdef ASSERT
 456   if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
 457       (PrintOpto || VerifyAliases ||
 458        PrintMiscellaneous && (WizardMode || Verbose))) {
 459     // Load nodes should not consume all of memory.
 460     // Reporting a bottom type indicates a bug in adlc.
 461     // If some particular type of node validly consumes all of memory,
 462     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 463     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 464     load->dump(2);
 465     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 466   }
 467 #endif
 468   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
 469          "String compare is only known 'load' that does not conflict with any stores");
 470   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
 471          "String equals is a 'load' that does not conflict with any stores");
 472   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
 473          "String indexOf is a 'load' that does not conflict with any stores");
 474   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
 475          "Arrays equals is a 'load' that do not conflict with any stores");
 476 
 477   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 478     // It is impossible to spoil this load by putting stores before it,
 479     // because we know that the stores will never update the value
 480     // which 'load' must witness.
 481     return LCA;
 482   }
 483 
 484   node_idx_t load_index = load->_idx;
 485 
 486   // Note the earliest legal placement of 'load', as determined by
 487   // by the unique point in the dom tree where all memory effects
 488   // and other inputs are first available.  (Computed by schedule_early.)
 489   // For normal loads, 'early' is the shallowest place (dom graph wise)
 490   // to look for anti-deps between this load and any store.
 491   Block* early = _bbs[load_index];
 492 
 493   // If we are subsuming loads, compute an "early" block that only considers
 494   // memory or address inputs. This block may be different than the
 495   // schedule_early block in that it could be at an even shallower depth in the
 496   // dominator tree, and allow for a broader discovery of anti-dependences.
 497   if (C->subsume_loads()) {
 498     early = memory_early_block(load, early, _bbs);
 499   }
 500 
 501   ResourceArea *area = Thread::current()->resource_area();
 502   Node_List worklist_mem(area);     // prior memory state to store
 503   Node_List worklist_store(area);   // possible-def to explore
 504   Node_List worklist_visited(area); // visited mergemem nodes
 505   Node_List non_early_stores(area); // all relevant stores outside of early
 506   bool must_raise_LCA = false;
 507 
 508 #ifdef TRACK_PHI_INPUTS
 509   // %%% This extra checking fails because MergeMem nodes are not GVNed.
 510   // Provide "phi_inputs" to check if every input to a PhiNode is from the
 511   // original memory state.  This indicates a PhiNode for which should not
 512   // prevent the load from sinking.  For such a block, set_raise_LCA_mark
 513   // may be overly conservative.
 514   // Mechanism: count inputs seen for each Phi encountered in worklist_store.
 515   DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
 516 #endif
 517 
 518   // 'load' uses some memory state; look for users of the same state.
 519   // Recurse through MergeMem nodes to the stores that use them.
 520 
 521   // Each of these stores is a possible definition of memory
 522   // that 'load' needs to use.  We need to force 'load'
 523   // to occur before each such store.  When the store is in
 524   // the same block as 'load', we insert an anti-dependence
 525   // edge load->store.
 526 
 527   // The relevant stores "nearby" the load consist of a tree rooted
 528   // at initial_mem, with internal nodes of type MergeMem.
 529   // Therefore, the branches visited by the worklist are of this form:
 530   //    initial_mem -> (MergeMem ->)* store
 531   // The anti-dependence constraints apply only to the fringe of this tree.
 532 
 533   Node* initial_mem = load->in(MemNode::Memory);
 534   worklist_store.push(initial_mem);
 535   worklist_visited.push(initial_mem);
 536   worklist_mem.push(NULL);
 537   while (worklist_store.size() > 0) {
 538     // Examine a nearby store to see if it might interfere with our load.
 539     Node* mem   = worklist_mem.pop();
 540     Node* store = worklist_store.pop();
 541     uint op = store->Opcode();
 542 
 543     // MergeMems do not directly have anti-deps.
 544     // Treat them as internal nodes in a forward tree of memory states,
 545     // the leaves of which are each a 'possible-def'.
 546     if (store == initial_mem    // root (exclusive) of tree we are searching
 547         || op == Op_MergeMem    // internal node of tree we are searching
 548         ) {
 549       mem = store;   // It's not a possibly interfering store.
 550       if (store == initial_mem)
 551         initial_mem = NULL;  // only process initial memory once
 552 
 553       for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 554         store = mem->fast_out(i);
 555         if (store->is_MergeMem()) {
 556           // Be sure we don't get into combinatorial problems.
 557           // (Allow phis to be repeated; they can merge two relevant states.)
 558           uint j = worklist_visited.size();
 559           for (; j > 0; j--) {
 560             if (worklist_visited.at(j-1) == store)  break;
 561           }
 562           if (j > 0)  continue; // already on work list; do not repeat
 563           worklist_visited.push(store);
 564         }
 565         worklist_mem.push(mem);
 566         worklist_store.push(store);
 567       }
 568       continue;
 569     }
 570 
 571     if (op == Op_MachProj || op == Op_Catch)   continue;
 572     if (store->needs_anti_dependence_check())  continue;  // not really a store
 573 
 574     // Compute the alias index.  Loads and stores with different alias
 575     // indices do not need anti-dependence edges.  Wide MemBar's are
 576     // anti-dependent on everything (except immutable memories).
 577     const TypePtr* adr_type = store->adr_type();
 578     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 579 
 580     // Most slow-path runtime calls do NOT modify Java memory, but
 581     // they can block and so write Raw memory.
 582     if (store->is_Mach()) {
 583       MachNode* mstore = store->as_Mach();
 584       if (load_alias_idx != Compile::AliasIdxRaw) {
 585         // Check for call into the runtime using the Java calling
 586         // convention (and from there into a wrapper); it has no
 587         // _method.  Can't do this optimization for Native calls because
 588         // they CAN write to Java memory.
 589         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
 590           assert(mstore->is_MachSafePoint(), "");
 591           MachSafePointNode* ms = (MachSafePointNode*) mstore;
 592           assert(ms->is_MachCallJava(), "");
 593           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 594           if (mcj->_method == NULL) {
 595             // These runtime calls do not write to Java visible memory
 596             // (other than Raw) and so do not require anti-dependence edges.
 597             continue;
 598           }
 599         }
 600         // Same for SafePoints: they read/write Raw but only read otherwise.
 601         // This is basically a workaround for SafePoints only defining control
 602         // instead of control + memory.
 603         if (mstore->ideal_Opcode() == Op_SafePoint)
 604           continue;
 605       } else {
 606         // Some raw memory, such as the load of "top" at an allocation,
 607         // can be control dependent on the previous safepoint. See
 608         // comments in GraphKit::allocate_heap() about control input.
 609         // Inserting an anti-dep between such a safepoint and a use
 610         // creates a cycle, and will cause a subsequent failure in
 611         // local scheduling.  (BugId 4919904)
 612         // (%%% How can a control input be a safepoint and not a projection??)
 613         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
 614           continue;
 615       }
 616     }
 617 
 618     // Identify a block that the current load must be above,
 619     // or else observe that 'store' is all the way up in the
 620     // earliest legal block for 'load'.  In the latter case,
 621     // immediately insert an anti-dependence edge.
 622     Block* store_block = _bbs[store->_idx];
 623     assert(store_block != NULL, "unused killing projections skipped above");
 624 
 625     if (store->is_Phi()) {
 626       // 'load' uses memory which is one (or more) of the Phi's inputs.
 627       // It must be scheduled not before the Phi, but rather before
 628       // each of the relevant Phi inputs.
 629       //
 630       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
 631       // we mark each corresponding predecessor block and do a combined
 632       // hoisting operation later (raise_LCA_above_marks).
 633       //
 634       // Do not assert(store_block != early, "Phi merging memory after access")
 635       // PhiNode may be at start of block 'early' with backedge to 'early'
 636       DEBUG_ONLY(bool found_match = false);
 637       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
 638         if (store->in(j) == mem) {   // Found matching input?
 639           DEBUG_ONLY(found_match = true);
 640           Block* pred_block = _bbs[store_block->pred(j)->_idx];
 641           if (pred_block != early) {
 642             // If any predecessor of the Phi matches the load's "early block",
 643             // we do not need a precedence edge between the Phi and 'load'
 644             // since the load will be forced into a block preceding the Phi.
 645             pred_block->set_raise_LCA_mark(load_index);
 646             assert(!LCA_orig->dominates(pred_block) ||
 647                    early->dominates(pred_block), "early is high enough");
 648             must_raise_LCA = true;
 649           } else {
 650             // anti-dependent upon PHI pinned below 'early', no edge needed
 651             LCA = early;             // but can not schedule below 'early'
 652           }
 653         }
 654       }
 655       assert(found_match, "no worklist bug");
 656 #ifdef TRACK_PHI_INPUTS
 657 #ifdef ASSERT
 658       // This assert asks about correct handling of PhiNodes, which may not
 659       // have all input edges directly from 'mem'. See BugId 4621264
 660       int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
 661       // Increment by exactly one even if there are multiple copies of 'mem'
 662       // coming into the phi, because we will run this block several times
 663       // if there are several copies of 'mem'.  (That's how DU iterators work.)
 664       phi_inputs.at_put(store->_idx, num_mem_inputs);
 665       assert(PhiNode::Input + num_mem_inputs < store->req(),
 666              "Expect at least one phi input will not be from original memory state");
 667 #endif //ASSERT
 668 #endif //TRACK_PHI_INPUTS
 669     } else if (store_block != early) {
 670       // 'store' is between the current LCA and earliest possible block.
 671       // Label its block, and decide later on how to raise the LCA
 672       // to include the effect on LCA of this store.
 673       // If this store's block gets chosen as the raised LCA, we
 674       // will find him on the non_early_stores list and stick him
 675       // with a precedence edge.
 676       // (But, don't bother if LCA is already raised all the way.)
 677       if (LCA != early) {
 678         store_block->set_raise_LCA_mark(load_index);
 679         must_raise_LCA = true;
 680         non_early_stores.push(store);
 681       }
 682     } else {
 683       // Found a possibly-interfering store in the load's 'early' block.
 684       // This means 'load' cannot sink at all in the dominator tree.
 685       // Add an anti-dep edge, and squeeze 'load' into the highest block.
 686       assert(store != load->in(0), "dependence cycle found");
 687       if (verify) {
 688         assert(store->find_edge(load) != -1, "missing precedence edge");
 689       } else {
 690         store->add_prec(load);
 691       }
 692       LCA = early;
 693       // This turns off the process of gathering non_early_stores.
 694     }
 695   }
 696   // (Worklist is now empty; all nearby stores have been visited.)
 697 
 698   // Finished if 'load' must be scheduled in its 'early' block.
 699   // If we found any stores there, they have already been given
 700   // precedence edges.
 701   if (LCA == early)  return LCA;
 702 
 703   // We get here only if there are no possibly-interfering stores
 704   // in the load's 'early' block.  Move LCA up above all predecessors
 705   // which contain stores we have noted.
 706   //
 707   // The raised LCA block can be a home to such interfering stores,
 708   // but its predecessors must not contain any such stores.
 709   //
 710   // The raised LCA will be a lower bound for placing the load,
 711   // preventing the load from sinking past any block containing
 712   // a store that may invalidate the memory state required by 'load'.
 713   if (must_raise_LCA)
 714     LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
 715   if (LCA == early)  return LCA;
 716 
 717   // Insert anti-dependence edges from 'load' to each store
 718   // in the non-early LCA block.
 719   // Mine the non_early_stores list for such stores.
 720   if (LCA->raise_LCA_mark() == load_index) {
 721     while (non_early_stores.size() > 0) {
 722       Node* store = non_early_stores.pop();
 723       Block* store_block = _bbs[store->_idx];
 724       if (store_block == LCA) {
 725         // add anti_dependence from store to load in its own block
 726         assert(store != load->in(0), "dependence cycle found");
 727         if (verify) {
 728           assert(store->find_edge(load) != -1, "missing precedence edge");
 729         } else {
 730           store->add_prec(load);
 731         }
 732       } else {
 733         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
 734         // Any other stores we found must be either inside the new LCA
 735         // or else outside the original LCA.  In the latter case, they
 736         // did not interfere with any use of 'load'.
 737         assert(LCA->dominates(store_block)
 738                || !LCA_orig->dominates(store_block), "no stray stores");
 739       }
 740     }
 741   }
 742 
 743   // Return the highest block containing stores; any stores
 744   // within that block have been given anti-dependence edges.
 745   return LCA;
 746 }
 747 
 748 // This class is used to iterate backwards over the nodes in the graph.
 749 
 750 class Node_Backward_Iterator {
 751 
 752 private:
 753   Node_Backward_Iterator();
 754 
 755 public:
 756   // Constructor for the iterator
 757   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
 758 
 759   // Postincrement operator to iterate over the nodes
 760   Node *next();
 761 
 762 private:
 763   VectorSet   &_visited;
 764   Node_List   &_stack;
 765   Block_Array &_bbs;
 766 };
 767 
 768 // Constructor for the Node_Backward_Iterator
 769 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
 770   : _visited(visited), _stack(stack), _bbs(bbs) {
 771   // The stack should contain exactly the root
 772   stack.clear();
 773   stack.push(root);
 774 
 775   // Clear the visited bits
 776   visited.Clear();
 777 }
 778 
 779 // Iterator for the Node_Backward_Iterator
 780 Node *Node_Backward_Iterator::next() {
 781 
 782   // If the _stack is empty, then just return NULL: finished.
 783   if ( !_stack.size() )
 784     return NULL;
 785 
 786   // '_stack' is emulating a real _stack.  The 'visit-all-users' loop has been
 787   // made stateless, so I do not need to record the index 'i' on my _stack.
 788   // Instead I visit all users each time, scanning for unvisited users.
 789   // I visit unvisited not-anti-dependence users first, then anti-dependent
 790   // children next.
 791   Node *self = _stack.pop();
 792 
 793   // I cycle here when I am entering a deeper level of recursion.
 794   // The key variable 'self' was set prior to jumping here.
 795   while( 1 ) {
 796 
 797     _visited.set(self->_idx);
 798 
 799     // Now schedule all uses as late as possible.
 800     uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
 801     uint src_rpo = _bbs[src]->_rpo;
 802 
 803     // Schedule all nodes in a post-order visit
 804     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
 805 
 806     // Scan for unvisited nodes
 807     for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
 808       // For all uses, schedule late
 809       Node* n = self->fast_out(i); // Use
 810 
 811       // Skip already visited children
 812       if ( _visited.test(n->_idx) )
 813         continue;
 814 
 815       // do not traverse backward control edges
 816       Node *use = n->is_Proj() ? n->in(0) : n;
 817       uint use_rpo = _bbs[use->_idx]->_rpo;
 818 
 819       if ( use_rpo < src_rpo )
 820         continue;
 821 
 822       // Phi nodes always precede uses in a basic block
 823       if ( use_rpo == src_rpo && use->is_Phi() )
 824         continue;
 825 
 826       unvisited = n;      // Found unvisited
 827 
 828       // Check for possible-anti-dependent
 829       if( !n->needs_anti_dependence_check() )
 830         break;            // Not visited, not anti-dep; schedule it NOW
 831     }
 832 
 833     // Did I find an unvisited not-anti-dependent Node?
 834     if ( !unvisited )
 835       break;                  // All done with children; post-visit 'self'
 836 
 837     // Visit the unvisited Node.  Contains the obvious push to
 838     // indicate I'm entering a deeper level of recursion.  I push the
 839     // old state onto the _stack and set a new state and loop (recurse).
 840     _stack.push(self);
 841     self = unvisited;
 842   } // End recursion loop
 843 
 844   return self;
 845 }
 846 
 847 //------------------------------ComputeLatenciesBackwards----------------------
 848 // Compute the latency of all the instructions.
 849 void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
 850 #ifndef PRODUCT
 851   if (trace_opto_pipelining())
 852     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
 853 #endif
 854 
 855   Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
 856   Node *n;
 857 
 858   // Walk over all the nodes from last to first
 859   while (n = iter.next()) {
 860     // Set the latency for the definitions of this instruction
 861     partial_latency_of_defs(n);
 862   }
 863 } // end ComputeLatenciesBackwards
 864 
 865 //------------------------------partial_latency_of_defs------------------------
 866 // Compute the latency impact of this node on all defs.  This computes
 867 // a number that increases as we approach the beginning of the routine.
 868 void PhaseCFG::partial_latency_of_defs(Node *n) {
 869   // Set the latency for this instruction
 870 #ifndef PRODUCT
 871   if (trace_opto_pipelining()) {
 872     tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
 873                n->_idx, _node_latency->at_grow(n->_idx));
 874     dump();
 875   }
 876 #endif
 877 
 878   if (n->is_Proj())
 879     n = n->in(0);
 880 
 881   if (n->is_Root())
 882     return;
 883 
 884   uint nlen = n->len();
 885   uint use_latency = _node_latency->at_grow(n->_idx);
 886   uint use_pre_order = _bbs[n->_idx]->_pre_order;
 887 
 888   for ( uint j=0; j<nlen; j++ ) {
 889     Node *def = n->in(j);
 890 
 891     if (!def || def == n)
 892       continue;
 893 
 894     // Walk backwards thru projections
 895     if (def->is_Proj())
 896       def = def->in(0);
 897 
 898 #ifndef PRODUCT
 899     if (trace_opto_pipelining()) {
 900       tty->print("#    in(%2d): ", j);
 901       def->dump();
 902     }
 903 #endif
 904 
 905     // If the defining block is not known, assume it is ok
 906     Block *def_block = _bbs[def->_idx];
 907     uint def_pre_order = def_block ? def_block->_pre_order : 0;
 908 
 909     if ( (use_pre_order <  def_pre_order) ||
 910          (use_pre_order == def_pre_order && n->is_Phi()) )
 911       continue;
 912 
 913     uint delta_latency = n->latency(j);
 914     uint current_latency = delta_latency + use_latency;
 915 
 916     if (_node_latency->at_grow(def->_idx) < current_latency) {
 917       _node_latency->at_put_grow(def->_idx, current_latency);
 918     }
 919 
 920 #ifndef PRODUCT
 921     if (trace_opto_pipelining()) {
 922       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
 923                     use_latency, j, delta_latency, current_latency, def->_idx,
 924                     _node_latency->at_grow(def->_idx));
 925     }
 926 #endif
 927   }
 928 }
 929 
 930 //------------------------------latency_from_use-------------------------------
 931 // Compute the latency of a specific use
 932 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
 933   // If self-reference, return no latency
 934   if (use == n || use->is_Root())
 935     return 0;
 936 
 937   uint def_pre_order = _bbs[def->_idx]->_pre_order;
 938   uint latency = 0;
 939 
 940   // If the use is not a projection, then it is simple...
 941   if (!use->is_Proj()) {
 942 #ifndef PRODUCT
 943     if (trace_opto_pipelining()) {
 944       tty->print("#    out(): ");
 945       use->dump();
 946     }
 947 #endif
 948 
 949     uint use_pre_order = _bbs[use->_idx]->_pre_order;
 950 
 951     if (use_pre_order < def_pre_order)
 952       return 0;
 953 
 954     if (use_pre_order == def_pre_order && use->is_Phi())
 955       return 0;
 956 
 957     uint nlen = use->len();
 958     uint nl = _node_latency->at_grow(use->_idx);
 959 
 960     for ( uint j=0; j<nlen; j++ ) {
 961       if (use->in(j) == n) {
 962         // Change this if we want local latencies
 963         uint ul = use->latency(j);
 964         uint  l = ul + nl;
 965         if (latency < l) latency = l;
 966 #ifndef PRODUCT
 967         if (trace_opto_pipelining()) {
 968           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
 969                         nl, j, ul, l, latency);
 970         }
 971 #endif
 972       }
 973     }
 974   } else {
 975     // This is a projection, just grab the latency of the use(s)
 976     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 977       uint l = latency_from_use(use, def, use->fast_out(j));
 978       if (latency < l) latency = l;
 979     }
 980   }
 981 
 982   return latency;
 983 }
 984 
 985 //------------------------------latency_from_uses------------------------------
 986 // Compute the latency of this instruction relative to all of it's uses.
 987 // This computes a number that increases as we approach the beginning of the
 988 // routine.
 989 void PhaseCFG::latency_from_uses(Node *n) {
 990   // Set the latency for this instruction
 991 #ifndef PRODUCT
 992   if (trace_opto_pipelining()) {
 993     tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
 994                n->_idx, _node_latency->at_grow(n->_idx));
 995     dump();
 996   }
 997 #endif
 998   uint latency=0;
 999   const Node *def = n->is_Proj() ? n->in(0): n;
1000 
1001   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1002     uint l = latency_from_use(n, def, n->fast_out(i));
1003 
1004     if (latency < l) latency = l;
1005   }
1006 
1007   _node_latency->at_put_grow(n->_idx, latency);
1008 }
1009 
1010 //------------------------------hoist_to_cheaper_block-------------------------
1011 // Pick a block for node self, between early and LCA, that is a cheaper
1012 // alternative to LCA.
1013 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1014   const double delta = 1+PROB_UNLIKELY_MAG(4);
1015   Block* least       = LCA;
1016   double least_freq  = least->_freq;
1017   uint target        = _node_latency->at_grow(self->_idx);
1018   uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
1019   uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
1020   bool in_latency    = (target <= start_latency);
1021   const Block* root_block = _bbs[_root->_idx];
1022 
1023   // Turn off latency scheduling if scheduling is just plain off
1024   if (!C->do_scheduling())
1025     in_latency = true;
1026 
1027   // Do not hoist (to cover latency) instructions which target a
1028   // single register.  Hoisting stretches the live range of the
1029   // single register and may force spilling.
1030   MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1031   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1032     in_latency = true;
1033 
1034 #ifndef PRODUCT
1035   if (trace_opto_pipelining()) {
1036     tty->print("# Find cheaper block for latency %d: ",
1037       _node_latency->at_grow(self->_idx));
1038     self->dump();
1039     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1040       LCA->_pre_order,
1041       LCA->_nodes[0]->_idx,
1042       start_latency,
1043       LCA->_nodes[LCA->end_idx()]->_idx,
1044       end_latency,
1045       least_freq);
1046   }
1047 #endif
1048 
1049   int cand_cnt = 0;  // number of candidates tried
1050 
1051   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1052   // the earliest legal location.  Capture the least execution frequency.
1053   while (LCA != early) {
1054     LCA = LCA->_idom;         // Follow up the dominator tree
1055 
1056     if (LCA == NULL) {
1057       // Bailout without retry
1058       C->record_method_not_compilable("late schedule failed: LCA == NULL");
1059       return least;
1060     }
1061 
1062     // Don't hoist machine instructions to the root basic block
1063     if (mach && LCA == root_block)
1064       break;
1065 
1066     uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
1067     uint end_idx   = LCA->end_idx();
1068     uint end_lat   = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
1069     double LCA_freq = LCA->_freq;
1070 #ifndef PRODUCT
1071     if (trace_opto_pipelining()) {
1072       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1073         LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
1074     }
1075 #endif
1076     cand_cnt++;
1077     if (LCA_freq < least_freq              || // Better Frequency
1078         (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1079          (!StressGCM                    &&    // Otherwise, choose with latency
1080           !in_latency                   &&    // No block containing latency
1081           LCA_freq < least_freq * delta &&    // No worse frequency
1082           target >= end_lat             &&    // within latency range
1083           !self->is_iteratively_computed() )  // But don't hoist IV increments
1084              // because they may end up above other uses of their phi forcing
1085              // their result register to be different from their input.
1086        ) {
1087       least = LCA;            // Found cheaper block
1088       least_freq = LCA_freq;
1089       start_latency = start_lat;
1090       end_latency = end_lat;
1091       if (target <= start_lat)
1092         in_latency = true;
1093     }
1094   }
1095 
1096 #ifndef PRODUCT
1097   if (trace_opto_pipelining()) {
1098     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1099       least->_pre_order, start_latency, least_freq);
1100   }
1101 #endif
1102 
1103   // See if the latency needs to be updated
1104   if (target < end_latency) {
1105 #ifndef PRODUCT
1106     if (trace_opto_pipelining()) {
1107       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1108     }
1109 #endif
1110     _node_latency->at_put_grow(self->_idx, end_latency);
1111     partial_latency_of_defs(self);
1112   }
1113 
1114   return least;
1115 }
1116 
1117 
1118 //------------------------------schedule_late-----------------------------------
1119 // Now schedule all codes as LATE as possible.  This is the LCA in the
1120 // dominator tree of all USES of a value.  Pick the block with the least
1121 // loop nesting depth that is lowest in the dominator tree.
1122 extern const char must_clone[];
1123 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1124 #ifndef PRODUCT
1125   if (trace_opto_pipelining())
1126     tty->print("\n#---- schedule_late ----\n");
1127 #endif
1128 
1129   Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
1130   Node *self;
1131 
1132   // Walk over all the nodes from last to first
1133   while (self = iter.next()) {
1134     Block* early = _bbs[self->_idx];   // Earliest legal placement
1135 
1136     if (self->is_top()) {
1137       // Top node goes in bb #2 with other constants.
1138       // It must be special-cased, because it has no out edges.
1139       early->add_inst(self);
1140       continue;
1141     }
1142 
1143     // No uses, just terminate
1144     if (self->outcnt() == 0) {
1145       assert(self->is_MachProj(), "sanity");
1146       continue;                   // Must be a dead machine projection
1147     }
1148 
1149     // If node is pinned in the block, then no scheduling can be done.
1150     if( self->pinned() )          // Pinned in block?
1151       continue;
1152 
1153     MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1154     if (mach) {
1155       switch (mach->ideal_Opcode()) {
1156       case Op_CreateEx:
1157         // Don't move exception creation
1158         early->add_inst(self);
1159         continue;
1160         break;
1161       case Op_CheckCastPP:
1162         // Don't move CheckCastPP nodes away from their input, if the input
1163         // is a rawptr (5071820).
1164         Node *def = self->in(1);
1165         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1166           early->add_inst(self);
1167 #ifdef ASSERT
1168           _raw_oops.push(def);
1169 #endif
1170           continue;
1171         }
1172         break;
1173       }
1174     }
1175 
1176     // Gather LCA of all uses
1177     Block *LCA = NULL;
1178     {
1179       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1180         // For all uses, find LCA
1181         Node* use = self->fast_out(i);
1182         LCA = raise_LCA_above_use(LCA, use, self, _bbs);
1183       }
1184     }  // (Hide defs of imax, i from rest of block.)
1185 
1186     // Place temps in the block of their use.  This isn't a
1187     // requirement for correctness but it reduces useless
1188     // interference between temps and other nodes.
1189     if (mach != NULL && mach->is_MachTemp()) {
1190       _bbs.map(self->_idx, LCA);
1191       LCA->add_inst(self);
1192       continue;
1193     }
1194 
1195     // Check if 'self' could be anti-dependent on memory
1196     if (self->needs_anti_dependence_check()) {
1197       // Hoist LCA above possible-defs and insert anti-dependences to
1198       // defs in new LCA block.
1199       LCA = insert_anti_dependences(LCA, self);
1200     }
1201 
1202     if (early->_dom_depth > LCA->_dom_depth) {
1203       // Somehow the LCA has moved above the earliest legal point.
1204       // (One way this can happen is via memory_early_block.)
1205       if (C->subsume_loads() == true && !C->failing()) {
1206         // Retry with subsume_loads == false
1207         // If this is the first failure, the sentinel string will "stick"
1208         // to the Compile object, and the C2Compiler will see it and retry.
1209         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1210       } else {
1211         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1212         C->record_method_not_compilable("late schedule failed: incorrect graph");
1213       }
1214       return;
1215     }
1216 
1217     // If there is no opportunity to hoist, then we're done.
1218     // In stress mode, try to hoist even the single operations.
1219     bool try_to_hoist = StressGCM || (LCA != early);
1220 
1221     // Must clone guys stay next to use; no hoisting allowed.
1222     // Also cannot hoist guys that alter memory or are otherwise not
1223     // allocatable (hoisting can make a value live longer, leading to
1224     // anti and output dependency problems which are normally resolved
1225     // by the register allocator giving everyone a different register).
1226     if (mach != NULL && must_clone[mach->ideal_Opcode()])
1227       try_to_hoist = false;
1228 
1229     Block* late = NULL;
1230     if (try_to_hoist) {
1231       // Now find the block with the least execution frequency.
1232       // Start at the latest schedule and work up to the earliest schedule
1233       // in the dominator tree.  Thus the Node will dominate all its uses.
1234       late = hoist_to_cheaper_block(LCA, early, self);
1235     } else {
1236       // Just use the LCA of the uses.
1237       late = LCA;
1238     }
1239 
1240     // Put the node into target block
1241     schedule_node_into_block(self, late);
1242 
1243 #ifdef ASSERT
1244     if (self->needs_anti_dependence_check()) {
1245       // since precedence edges are only inserted when we're sure they
1246       // are needed make sure that after placement in a block we don't
1247       // need any new precedence edges.
1248       verify_anti_dependences(late, self);
1249     }
1250 #endif
1251   } // Loop until all nodes have been visited
1252 
1253 } // end ScheduleLate
1254 
1255 //------------------------------GlobalCodeMotion-------------------------------
1256 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) {
1257   ResourceMark rm;
1258 
1259 #ifndef PRODUCT
1260   if (trace_opto_pipelining()) {
1261     tty->print("\n---- Start GlobalCodeMotion ----\n");
1262   }
1263 #endif
1264 
1265   // Initialize the bbs.map for things on the proj_list
1266   uint i;
1267   for( i=0; i < proj_list.size(); i++ )
1268     _bbs.map(proj_list[i]->_idx, NULL);
1269 
1270   // Set the basic block for Nodes pinned into blocks
1271   Arena *a = Thread::current()->resource_area();
1272   VectorSet visited(a);
1273   schedule_pinned_nodes( visited );
1274 
1275   // Find the earliest Block any instruction can be placed in.  Some
1276   // instructions are pinned into Blocks.  Unpinned instructions can
1277   // appear in last block in which all their inputs occur.
1278   visited.Clear();
1279   Node_List stack(a);
1280   stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list
1281   if (!schedule_early(visited, stack)) {
1282     // Bailout without retry
1283     C->record_method_not_compilable("early schedule failed");
1284     return;
1285   }
1286 
1287   // Build Def-Use edges.
1288   proj_list.push(_root);        // Add real root as another root
1289   proj_list.pop();
1290 
1291   // Compute the latency information (via backwards walk) for all the
1292   // instructions in the graph
1293   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1294 
1295   if( C->do_scheduling() )
1296     ComputeLatenciesBackwards(visited, stack);
1297 
1298   // Now schedule all codes as LATE as possible.  This is the LCA in the
1299   // dominator tree of all USES of a value.  Pick the block with the least
1300   // loop nesting depth that is lowest in the dominator tree.
1301   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1302   schedule_late(visited, stack);
1303   if( C->failing() ) {
1304     // schedule_late fails only when graph is incorrect.
1305     assert(!VerifyGraphEdges, "verification should have failed");
1306     return;
1307   }
1308 
1309   unique = C->unique();
1310 
1311 #ifndef PRODUCT
1312   if (trace_opto_pipelining()) {
1313     tty->print("\n---- Detect implicit null checks ----\n");
1314   }
1315 #endif
1316 
1317   // Detect implicit-null-check opportunities.  Basically, find NULL checks
1318   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
1319   // I can generate a memory op if there is not one nearby.
1320   if (C->is_method_compilation()) {
1321     // Don't do it for natives, adapters, or runtime stubs
1322     int allowed_reasons = 0;
1323     // ...and don't do it when there have been too many traps, globally.
1324     for (int reason = (int)Deoptimization::Reason_none+1;
1325          reason < Compile::trapHistLength; reason++) {
1326       assert(reason < BitsPerInt, "recode bit map");
1327       if (!C->too_many_traps((Deoptimization::DeoptReason) reason))
1328         allowed_reasons |= nth_bit(reason);
1329     }
1330     // By reversing the loop direction we get a very minor gain on mpegaudio.
1331     // Feel free to revert to a forward loop for clarity.
1332     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1333     for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
1334       Node *proj = matcher._null_check_tests[i  ];
1335       Node *val  = matcher._null_check_tests[i+1];
1336       _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
1337       // The implicit_null_check will only perform the transformation
1338       // if the null branch is truly uncommon, *and* it leads to an
1339       // uncommon trap.  Combined with the too_many_traps guards
1340       // above, this prevents SEGV storms reported in 6366351,
1341       // by recompiling offending methods without this optimization.
1342     }
1343   }
1344 
1345 #ifndef PRODUCT
1346   if (trace_opto_pipelining()) {
1347     tty->print("\n---- Start Local Scheduling ----\n");
1348   }
1349 #endif
1350 
1351   // Schedule locally.  Right now a simple topological sort.
1352   // Later, do a real latency aware scheduler.
1353   uint max_idx = C->unique();
1354   GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
1355   visited.Clear();
1356   for (i = 0; i < _num_blocks; i++) {
1357     if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
1358       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1359         C->record_method_not_compilable("local schedule failed");
1360       }
1361       return;
1362     }
1363   }
1364 
1365   // If we inserted any instructions between a Call and his CatchNode,
1366   // clone the instructions on all paths below the Catch.
1367   for( i=0; i < _num_blocks; i++ )
1368     _blocks[i]->call_catch_cleanup(_bbs, C);
1369 
1370 #ifndef PRODUCT
1371   if (trace_opto_pipelining()) {
1372     tty->print("\n---- After GlobalCodeMotion ----\n");
1373     for (uint i = 0; i < _num_blocks; i++) {
1374       _blocks[i]->dump();
1375     }
1376   }
1377 #endif
1378   // Dead.
1379   _node_latency = (GrowableArray<uint> *)0xdeadbeef;
1380 }
1381 
1382 
1383 //------------------------------Estimate_Block_Frequency-----------------------
1384 // Estimate block frequencies based on IfNode probabilities.
1385 void PhaseCFG::Estimate_Block_Frequency() {
1386 
1387   // Force conditional branches leading to uncommon traps to be unlikely,
1388   // not because we get to the uncommon_trap with less relative frequency,
1389   // but because an uncommon_trap typically causes a deopt, so we only get
1390   // there once.
1391   if (C->do_freq_based_layout()) {
1392     Block_List worklist;
1393     Block* root_blk = _blocks[0];
1394     for (uint i = 1; i < root_blk->num_preds(); i++) {
1395       Block *pb = _bbs[root_blk->pred(i)->_idx];
1396       if (pb->has_uncommon_code()) {
1397         worklist.push(pb);
1398       }
1399     }
1400     while (worklist.size() > 0) {
1401       Block* uct = worklist.pop();
1402       if (uct == _broot) continue;
1403       for (uint i = 1; i < uct->num_preds(); i++) {
1404         Block *pb = _bbs[uct->pred(i)->_idx];
1405         if (pb->_num_succs == 1) {
1406           worklist.push(pb);
1407         } else if (pb->num_fall_throughs() == 2) {
1408           pb->update_uncommon_branch(uct);
1409         }
1410       }
1411     }
1412   }
1413 
1414   // Create the loop tree and calculate loop depth.
1415   _root_loop = create_loop_tree();
1416   _root_loop->compute_loop_depth(0);
1417 
1418   // Compute block frequency of each block, relative to a single loop entry.
1419   _root_loop->compute_freq();
1420 
1421   // Adjust all frequencies to be relative to a single method entry
1422   _root_loop->_freq = 1.0;
1423   _root_loop->scale_freq();
1424 
1425   // Save outmost loop frequency for LRG frequency threshold
1426   _outer_loop_freq = _root_loop->outer_loop_freq();
1427 
1428   // force paths ending at uncommon traps to be infrequent
1429   if (!C->do_freq_based_layout()) {
1430     Block_List worklist;
1431     Block* root_blk = _blocks[0];
1432     for (uint i = 1; i < root_blk->num_preds(); i++) {
1433       Block *pb = _bbs[root_blk->pred(i)->_idx];
1434       if (pb->has_uncommon_code()) {
1435         worklist.push(pb);
1436       }
1437     }
1438     while (worklist.size() > 0) {
1439       Block* uct = worklist.pop();
1440       uct->_freq = PROB_MIN;
1441       for (uint i = 1; i < uct->num_preds(); i++) {
1442         Block *pb = _bbs[uct->pred(i)->_idx];
1443         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1444           worklist.push(pb);
1445         }
1446       }
1447     }
1448   }
1449 
1450 #ifdef ASSERT
1451   for (uint i = 0; i < _num_blocks; i++ ) {
1452     Block *b = _blocks[i];
1453     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1454   }
1455 #endif
1456 
1457 #ifndef PRODUCT
1458   if (PrintCFGBlockFreq) {
1459     tty->print_cr("CFG Block Frequencies");
1460     _root_loop->dump_tree();
1461     if (Verbose) {
1462       tty->print_cr("PhaseCFG dump");
1463       dump();
1464       tty->print_cr("Node dump");
1465       _root->dump(99999);
1466     }
1467   }
1468 #endif
1469 }
1470 
1471 //----------------------------create_loop_tree--------------------------------
1472 // Create a loop tree from the CFG
1473 CFGLoop* PhaseCFG::create_loop_tree() {
1474 
1475 #ifdef ASSERT
1476   assert( _blocks[0] == _broot, "" );
1477   for (uint i = 0; i < _num_blocks; i++ ) {
1478     Block *b = _blocks[i];
1479     // Check that _loop field are clear...we could clear them if not.
1480     assert(b->_loop == NULL, "clear _loop expected");
1481     // Sanity check that the RPO numbering is reflected in the _blocks array.
1482     // It doesn't have to be for the loop tree to be built, but if it is not,
1483     // then the blocks have been reordered since dom graph building...which
1484     // may question the RPO numbering
1485     assert(b->_rpo == i, "unexpected reverse post order number");
1486   }
1487 #endif
1488 
1489   int idct = 0;
1490   CFGLoop* root_loop = new CFGLoop(idct++);
1491 
1492   Block_List worklist;
1493 
1494   // Assign blocks to loops
1495   for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block
1496     Block *b = _blocks[i];
1497 
1498     if (b->head()->is_Loop()) {
1499       Block* loop_head = b;
1500       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1501       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1502       Block* tail = _bbs[tail_n->_idx];
1503 
1504       // Defensively filter out Loop nodes for non-single-entry loops.
1505       // For all reasonable loops, the head occurs before the tail in RPO.
1506       if (i <= tail->_rpo) {
1507 
1508         // The tail and (recursive) predecessors of the tail
1509         // are made members of a new loop.
1510 
1511         assert(worklist.size() == 0, "nonempty worklist");
1512         CFGLoop* nloop = new CFGLoop(idct++);
1513         assert(loop_head->_loop == NULL, "just checking");
1514         loop_head->_loop = nloop;
1515         // Add to nloop so push_pred() will skip over inner loops
1516         nloop->add_member(loop_head);
1517         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
1518 
1519         while (worklist.size() > 0) {
1520           Block* member = worklist.pop();
1521           if (member != loop_head) {
1522             for (uint j = 1; j < member->num_preds(); j++) {
1523               nloop->push_pred(member, j, worklist, _bbs);
1524             }
1525           }
1526         }
1527       }
1528     }
1529   }
1530 
1531   // Create a member list for each loop consisting
1532   // of both blocks and (immediate child) loops.
1533   for (uint i = 0; i < _num_blocks; i++) {
1534     Block *b = _blocks[i];
1535     CFGLoop* lp = b->_loop;
1536     if (lp == NULL) {
1537       // Not assigned to a loop. Add it to the method's pseudo loop.
1538       b->_loop = root_loop;
1539       lp = root_loop;
1540     }
1541     if (lp == root_loop || b != lp->head()) { // loop heads are already members
1542       lp->add_member(b);
1543     }
1544     if (lp != root_loop) {
1545       if (lp->parent() == NULL) {
1546         // Not a nested loop. Make it a child of the method's pseudo loop.
1547         root_loop->add_nested_loop(lp);
1548       }
1549       if (b == lp->head()) {
1550         // Add nested loop to member list of parent loop.
1551         lp->parent()->add_member(lp);
1552       }
1553     }
1554   }
1555 
1556   return root_loop;
1557 }
1558 
1559 //------------------------------push_pred--------------------------------------
1560 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
1561   Node* pred_n = blk->pred(i);
1562   Block* pred = node_to_blk[pred_n->_idx];
1563   CFGLoop *pred_loop = pred->_loop;
1564   if (pred_loop == NULL) {
1565     // Filter out blocks for non-single-entry loops.
1566     // For all reasonable loops, the head occurs before the tail in RPO.
1567     if (pred->_rpo > head()->_rpo) {
1568       pred->_loop = this;
1569       worklist.push(pred);
1570     }
1571   } else if (pred_loop != this) {
1572     // Nested loop.
1573     while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1574       pred_loop = pred_loop->_parent;
1575     }
1576     // Make pred's loop be a child
1577     if (pred_loop->_parent == NULL) {
1578       add_nested_loop(pred_loop);
1579       // Continue with loop entry predecessor.
1580       Block* pred_head = pred_loop->head();
1581       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1582       assert(pred_head != head(), "loop head in only one loop");
1583       push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
1584     } else {
1585       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1586     }
1587   }
1588 }
1589 
1590 //------------------------------add_nested_loop--------------------------------
1591 // Make cl a child of the current loop in the loop tree.
1592 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1593   assert(_parent == NULL, "no parent yet");
1594   assert(cl != this, "not my own parent");
1595   cl->_parent = this;
1596   CFGLoop* ch = _child;
1597   if (ch == NULL) {
1598     _child = cl;
1599   } else {
1600     while (ch->_sibling != NULL) { ch = ch->_sibling; }
1601     ch->_sibling = cl;
1602   }
1603 }
1604 
1605 //------------------------------compute_loop_depth-----------------------------
1606 // Store the loop depth in each CFGLoop object.
1607 // Recursively walk the children to do the same for them.
1608 void CFGLoop::compute_loop_depth(int depth) {
1609   _depth = depth;
1610   CFGLoop* ch = _child;
1611   while (ch != NULL) {
1612     ch->compute_loop_depth(depth + 1);
1613     ch = ch->_sibling;
1614   }
1615 }
1616 
1617 //------------------------------compute_freq-----------------------------------
1618 // Compute the frequency of each block and loop, relative to a single entry
1619 // into the dominating loop head.
1620 void CFGLoop::compute_freq() {
1621   // Bottom up traversal of loop tree (visit inner loops first.)
1622   // Set loop head frequency to 1.0, then transitively
1623   // compute frequency for all successors in the loop,
1624   // as well as for each exit edge.  Inner loops are
1625   // treated as single blocks with loop exit targets
1626   // as the successor blocks.
1627 
1628   // Nested loops first
1629   CFGLoop* ch = _child;
1630   while (ch != NULL) {
1631     ch->compute_freq();
1632     ch = ch->_sibling;
1633   }
1634   assert (_members.length() > 0, "no empty loops");
1635   Block* hd = head();
1636   hd->_freq = 1.0f;
1637   for (int i = 0; i < _members.length(); i++) {
1638     CFGElement* s = _members.at(i);
1639     float freq = s->_freq;
1640     if (s->is_block()) {
1641       Block* b = s->as_Block();
1642       for (uint j = 0; j < b->_num_succs; j++) {
1643         Block* sb = b->_succs[j];
1644         update_succ_freq(sb, freq * b->succ_prob(j));
1645       }
1646     } else {
1647       CFGLoop* lp = s->as_CFGLoop();
1648       assert(lp->_parent == this, "immediate child");
1649       for (int k = 0; k < lp->_exits.length(); k++) {
1650         Block* eb = lp->_exits.at(k).get_target();
1651         float prob = lp->_exits.at(k).get_prob();
1652         update_succ_freq(eb, freq * prob);
1653       }
1654     }
1655   }
1656 
1657   // For all loops other than the outer, "method" loop,
1658   // sum and normalize the exit probability. The "method" loop
1659   // should keep the initial exit probability of 1, so that
1660   // inner blocks do not get erroneously scaled.
1661   if (_depth != 0) {
1662     // Total the exit probabilities for this loop.
1663     float exits_sum = 0.0f;
1664     for (int i = 0; i < _exits.length(); i++) {
1665       exits_sum += _exits.at(i).get_prob();
1666     }
1667 
1668     // Normalize the exit probabilities. Until now, the
1669     // probabilities estimate the possibility of exit per
1670     // a single loop iteration; afterward, they estimate
1671     // the probability of exit per loop entry.
1672     for (int i = 0; i < _exits.length(); i++) {
1673       Block* et = _exits.at(i).get_target();
1674       float new_prob = 0.0f;
1675       if (_exits.at(i).get_prob() > 0.0f) {
1676         new_prob = _exits.at(i).get_prob() / exits_sum;
1677       }
1678       BlockProbPair bpp(et, new_prob);
1679       _exits.at_put(i, bpp);
1680     }
1681 
1682     // Save the total, but guard against unreasonable probability,
1683     // as the value is used to estimate the loop trip count.
1684     // An infinite trip count would blur relative block
1685     // frequencies.
1686     if (exits_sum > 1.0f) exits_sum = 1.0;
1687     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1688     _exit_prob = exits_sum;
1689   }
1690 }
1691 
1692 //------------------------------succ_prob-------------------------------------
1693 // Determine the probability of reaching successor 'i' from the receiver block.
1694 float Block::succ_prob(uint i) {
1695   int eidx = end_idx();
1696   Node *n = _nodes[eidx];  // Get ending Node
1697 
1698   int op = n->Opcode();
1699   if (n->is_Mach()) {
1700     if (n->is_MachNullCheck()) {
1701       // Can only reach here if called after lcm. The original Op_If is gone,
1702       // so we attempt to infer the probability from one or both of the
1703       // successor blocks.
1704       assert(_num_succs == 2, "expecting 2 successors of a null check");
1705       // If either successor has only one predecessor, then the
1706       // probability estimate can be derived using the
1707       // relative frequency of the successor and this block.
1708       if (_succs[i]->num_preds() == 2) {
1709         return _succs[i]->_freq / _freq;
1710       } else if (_succs[1-i]->num_preds() == 2) {
1711         return 1 - (_succs[1-i]->_freq / _freq);
1712       } else {
1713         // Estimate using both successor frequencies
1714         float freq = _succs[i]->_freq;
1715         return freq / (freq + _succs[1-i]->_freq);
1716       }
1717     }
1718     op = n->as_Mach()->ideal_Opcode();
1719   }
1720 
1721 
1722   // Switch on branch type
1723   switch( op ) {
1724   case Op_CountedLoopEnd:
1725   case Op_If: {
1726     assert (i < 2, "just checking");
1727     // Conditionals pass on only part of their frequency
1728     float prob  = n->as_MachIf()->_prob;
1729     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1730     // If succ[i] is the FALSE branch, invert path info
1731     if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
1732       return 1.0f - prob; // not taken
1733     } else {
1734       return prob; // taken
1735     }
1736   }
1737 
1738   case Op_Jump:
1739     // Divide the frequency between all successors evenly
1740     return 1.0f/_num_succs;
1741 
1742   case Op_Catch: {
1743     const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1744     if (ci->_con == CatchProjNode::fall_through_index) {
1745       // Fall-thru path gets the lion's share.
1746       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1747     } else {
1748       // Presume exceptional paths are equally unlikely
1749       return PROB_UNLIKELY_MAG(5);
1750     }
1751   }
1752 
1753   case Op_Root:
1754   case Op_Goto:
1755     // Pass frequency straight thru to target
1756     return 1.0f;
1757 
1758   case Op_NeverBranch:
1759     return 0.0f;
1760 
1761   case Op_TailCall:
1762   case Op_TailJump:
1763   case Op_Return:
1764   case Op_Halt:
1765   case Op_Rethrow:
1766     // Do not push out freq to root block
1767     return 0.0f;
1768 
1769   default:
1770     ShouldNotReachHere();
1771   }
1772 
1773   return 0.0f;
1774 }
1775 
1776 //------------------------------num_fall_throughs-----------------------------
1777 // Return the number of fall-through candidates for a block
1778 int Block::num_fall_throughs() {
1779   int eidx = end_idx();
1780   Node *n = _nodes[eidx];  // Get ending Node
1781 
1782   int op = n->Opcode();
1783   if (n->is_Mach()) {
1784     if (n->is_MachNullCheck()) {
1785       // In theory, either side can fall-thru, for simplicity sake,
1786       // let's say only the false branch can now.
1787       return 1;
1788     }
1789     op = n->as_Mach()->ideal_Opcode();
1790   }
1791 
1792   // Switch on branch type
1793   switch( op ) {
1794   case Op_CountedLoopEnd:
1795   case Op_If:
1796     return 2;
1797 
1798   case Op_Root:
1799   case Op_Goto:
1800     return 1;
1801 
1802   case Op_Catch: {
1803     for (uint i = 0; i < _num_succs; i++) {
1804       const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1805       if (ci->_con == CatchProjNode::fall_through_index) {
1806         return 1;
1807       }
1808     }
1809     return 0;
1810   }
1811 
1812   case Op_Jump:
1813   case Op_NeverBranch:
1814   case Op_TailCall:
1815   case Op_TailJump:
1816   case Op_Return:
1817   case Op_Halt:
1818   case Op_Rethrow:
1819     return 0;
1820 
1821   default:
1822     ShouldNotReachHere();
1823   }
1824 
1825   return 0;
1826 }
1827 
1828 //------------------------------succ_fall_through-----------------------------
1829 // Return true if a specific successor could be fall-through target.
1830 bool Block::succ_fall_through(uint i) {
1831   int eidx = end_idx();
1832   Node *n = _nodes[eidx];  // Get ending Node
1833 
1834   int op = n->Opcode();
1835   if (n->is_Mach()) {
1836     if (n->is_MachNullCheck()) {
1837       // In theory, either side can fall-thru, for simplicity sake,
1838       // let's say only the false branch can now.
1839       return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
1840     }
1841     op = n->as_Mach()->ideal_Opcode();
1842   }
1843 
1844   // Switch on branch type
1845   switch( op ) {
1846   case Op_CountedLoopEnd:
1847   case Op_If:
1848   case Op_Root:
1849   case Op_Goto:
1850     return true;
1851 
1852   case Op_Catch: {
1853     const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1854     return ci->_con == CatchProjNode::fall_through_index;
1855   }
1856 
1857   case Op_Jump:
1858   case Op_NeverBranch:
1859   case Op_TailCall:
1860   case Op_TailJump:
1861   case Op_Return:
1862   case Op_Halt:
1863   case Op_Rethrow:
1864     return false;
1865 
1866   default:
1867     ShouldNotReachHere();
1868   }
1869 
1870   return false;
1871 }
1872 
1873 //------------------------------update_uncommon_branch------------------------
1874 // Update the probability of a two-branch to be uncommon
1875 void Block::update_uncommon_branch(Block* ub) {
1876   int eidx = end_idx();
1877   Node *n = _nodes[eidx];  // Get ending Node
1878 
1879   int op = n->as_Mach()->ideal_Opcode();
1880 
1881   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
1882   assert(num_fall_throughs() == 2, "must be a two way branch block");
1883 
1884   // Which successor is ub?
1885   uint s;
1886   for (s = 0; s <_num_succs; s++) {
1887     if (_succs[s] == ub) break;
1888   }
1889   assert(s < 2, "uncommon successor must be found");
1890 
1891   // If ub is the true path, make the proability small, else
1892   // ub is the false path, and make the probability large
1893   bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
1894 
1895   // Get existing probability
1896   float p = n->as_MachIf()->_prob;
1897 
1898   if (invert) p = 1.0 - p;
1899   if (p > PROB_MIN) {
1900     p = PROB_MIN;
1901   }
1902   if (invert) p = 1.0 - p;
1903 
1904   n->as_MachIf()->_prob = p;
1905 }
1906 
1907 //------------------------------update_succ_freq-------------------------------
1908 // Update the appropriate frequency associated with block 'b', a successor of
1909 // a block in this loop.
1910 void CFGLoop::update_succ_freq(Block* b, float freq) {
1911   if (b->_loop == this) {
1912     if (b == head()) {
1913       // back branch within the loop
1914       // Do nothing now, the loop carried frequency will be
1915       // adjust later in scale_freq().
1916     } else {
1917       // simple branch within the loop
1918       b->_freq += freq;
1919     }
1920   } else if (!in_loop_nest(b)) {
1921     // branch is exit from this loop
1922     BlockProbPair bpp(b, freq);
1923     _exits.append(bpp);
1924   } else {
1925     // branch into nested loop
1926     CFGLoop* ch = b->_loop;
1927     ch->_freq += freq;
1928   }
1929 }
1930 
1931 //------------------------------in_loop_nest-----------------------------------
1932 // Determine if block b is in the receiver's loop nest.
1933 bool CFGLoop::in_loop_nest(Block* b) {
1934   int depth = _depth;
1935   CFGLoop* b_loop = b->_loop;
1936   int b_depth = b_loop->_depth;
1937   if (depth == b_depth) {
1938     return true;
1939   }
1940   while (b_depth > depth) {
1941     b_loop = b_loop->_parent;
1942     b_depth = b_loop->_depth;
1943   }
1944   return b_loop == this;
1945 }
1946 
1947 //------------------------------scale_freq-------------------------------------
1948 // Scale frequency of loops and blocks by trip counts from outer loops
1949 // Do a top down traversal of loop tree (visit outer loops first.)
1950 void CFGLoop::scale_freq() {
1951   float loop_freq = _freq * trip_count();
1952   _freq = loop_freq;
1953   for (int i = 0; i < _members.length(); i++) {
1954     CFGElement* s = _members.at(i);
1955     float block_freq = s->_freq * loop_freq;
1956     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
1957       block_freq = MIN_BLOCK_FREQUENCY;
1958     s->_freq = block_freq;
1959   }
1960   CFGLoop* ch = _child;
1961   while (ch != NULL) {
1962     ch->scale_freq();
1963     ch = ch->_sibling;
1964   }
1965 }
1966 
1967 // Frequency of outer loop
1968 float CFGLoop::outer_loop_freq() const {
1969   if (_child != NULL) {
1970     return _child->_freq;
1971   }
1972   return _freq;
1973 }
1974 
1975 #ifndef PRODUCT
1976 //------------------------------dump_tree--------------------------------------
1977 void CFGLoop::dump_tree() const {
1978   dump();
1979   if (_child != NULL)   _child->dump_tree();
1980   if (_sibling != NULL) _sibling->dump_tree();
1981 }
1982 
1983 //------------------------------dump-------------------------------------------
1984 void CFGLoop::dump() const {
1985   for (int i = 0; i < _depth; i++) tty->print("   ");
1986   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
1987              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
1988   for (int i = 0; i < _depth; i++) tty->print("   ");
1989   tty->print("         members:", _id);
1990   int k = 0;
1991   for (int i = 0; i < _members.length(); i++) {
1992     if (k++ >= 6) {
1993       tty->print("\n              ");
1994       for (int j = 0; j < _depth+1; j++) tty->print("   ");
1995       k = 0;
1996     }
1997     CFGElement *s = _members.at(i);
1998     if (s->is_block()) {
1999       Block *b = s->as_Block();
2000       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2001     } else {
2002       CFGLoop* lp = s->as_CFGLoop();
2003       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2004     }
2005   }
2006   tty->print("\n");
2007   for (int i = 0; i < _depth; i++) tty->print("   ");
2008   tty->print("         exits:  ");
2009   k = 0;
2010   for (int i = 0; i < _exits.length(); i++) {
2011     if (k++ >= 7) {
2012       tty->print("\n              ");
2013       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2014       k = 0;
2015     }
2016     Block *blk = _exits.at(i).get_target();
2017     float prob = _exits.at(i).get_prob();
2018     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2019   }
2020   tty->print("\n");
2021 }
2022 #endif