1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/ad.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/idealGraphPrinter.hpp"
  31 #include "opto/matcher.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/movenode.hpp"
  34 #include "opto/opcodes.hpp"
  35 #include "opto/regmask.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/runtime.hpp"
  38 #include "opto/type.hpp"
  39 #include "opto/vectornode.hpp"
  40 #include "runtime/os.hpp"
  41 
  42 OptoReg::Name OptoReg::c_frame_pointer;
  43 
  44 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  45 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  46 RegMask Matcher::STACK_ONLY_mask;
  47 RegMask Matcher::c_frame_ptr_mask;
  48 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  49 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  50 
  51 //---------------------------Matcher-------------------------------------------
  52 Matcher::Matcher()
  53 : PhaseTransform( Phase::Ins_Select ),
  54 #ifdef ASSERT
  55   _old2new_map(C->comp_arena()),
  56   _new2old_map(C->comp_arena()),
  57 #endif
  58   _shared_nodes(C->comp_arena()),
  59   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  60   _swallowed(swallowed),
  61   _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
  62   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  63   _must_clone(must_clone),
  64   _register_save_policy(register_save_policy),
  65   _c_reg_save_policy(c_reg_save_policy),
  66   _register_save_type(register_save_type),
  67   _ruleName(ruleName),
  68   _allocation_started(false),
  69   _states_arena(Chunk::medium_size),
  70   _visited(&_states_arena),
  71   _shared(&_states_arena),
  72   _dontcare(&_states_arena) {
  73   C->set_matcher(this);
  74 
  75   idealreg2spillmask  [Op_RegI] = NULL;
  76   idealreg2spillmask  [Op_RegN] = NULL;
  77   idealreg2spillmask  [Op_RegL] = NULL;
  78   idealreg2spillmask  [Op_RegF] = NULL;
  79   idealreg2spillmask  [Op_RegD] = NULL;
  80   idealreg2spillmask  [Op_RegP] = NULL;
  81   idealreg2spillmask  [Op_VecS] = NULL;
  82   idealreg2spillmask  [Op_VecD] = NULL;
  83   idealreg2spillmask  [Op_VecX] = NULL;
  84   idealreg2spillmask  [Op_VecY] = NULL;
  85 
  86   idealreg2debugmask  [Op_RegI] = NULL;
  87   idealreg2debugmask  [Op_RegN] = NULL;
  88   idealreg2debugmask  [Op_RegL] = NULL;
  89   idealreg2debugmask  [Op_RegF] = NULL;
  90   idealreg2debugmask  [Op_RegD] = NULL;
  91   idealreg2debugmask  [Op_RegP] = NULL;
  92   idealreg2debugmask  [Op_VecS] = NULL;
  93   idealreg2debugmask  [Op_VecD] = NULL;
  94   idealreg2debugmask  [Op_VecX] = NULL;
  95   idealreg2debugmask  [Op_VecY] = NULL;
  96 
  97   idealreg2mhdebugmask[Op_RegI] = NULL;
  98   idealreg2mhdebugmask[Op_RegN] = NULL;
  99   idealreg2mhdebugmask[Op_RegL] = NULL;
 100   idealreg2mhdebugmask[Op_RegF] = NULL;
 101   idealreg2mhdebugmask[Op_RegD] = NULL;
 102   idealreg2mhdebugmask[Op_RegP] = NULL;
 103   idealreg2mhdebugmask[Op_VecS] = NULL;
 104   idealreg2mhdebugmask[Op_VecD] = NULL;
 105   idealreg2mhdebugmask[Op_VecX] = NULL;
 106   idealreg2mhdebugmask[Op_VecY] = NULL;
 107 
 108   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 109 }
 110 
 111 //------------------------------warp_incoming_stk_arg------------------------
 112 // This warps a VMReg into an OptoReg::Name
 113 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 114   OptoReg::Name warped;
 115   if( reg->is_stack() ) {  // Stack slot argument?
 116     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 117     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 118     if( warped >= _in_arg_limit )
 119       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 120     if (!RegMask::can_represent_arg(warped)) {
 121       // the compiler cannot represent this method's calling sequence
 122       C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
 123       return OptoReg::Bad;
 124     }
 125     return warped;
 126   }
 127   return OptoReg::as_OptoReg(reg);
 128 }
 129 
 130 //---------------------------compute_old_SP------------------------------------
 131 OptoReg::Name Compile::compute_old_SP() {
 132   int fixed    = fixed_slots();
 133   int preserve = in_preserve_stack_slots();
 134   return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
 135 }
 136 
 137 
 138 
 139 #ifdef ASSERT
 140 void Matcher::verify_new_nodes_only(Node* xroot) {
 141   // Make sure that the new graph only references new nodes
 142   ResourceMark rm;
 143   Unique_Node_List worklist;
 144   VectorSet visited(Thread::current()->resource_area());
 145   worklist.push(xroot);
 146   while (worklist.size() > 0) {
 147     Node* n = worklist.pop();
 148     visited <<= n->_idx;
 149     assert(C->node_arena()->contains(n), "dead node");
 150     for (uint j = 0; j < n->req(); j++) {
 151       Node* in = n->in(j);
 152       if (in != NULL) {
 153         assert(C->node_arena()->contains(in), "dead node");
 154         if (!visited.test(in->_idx)) {
 155           worklist.push(in);
 156         }
 157       }
 158     }
 159   }
 160 }
 161 #endif
 162 
 163 
 164 //---------------------------match---------------------------------------------
 165 void Matcher::match( ) {
 166   if( MaxLabelRootDepth < 100 ) { // Too small?
 167     assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
 168     MaxLabelRootDepth = 100;
 169   }
 170   // One-time initialization of some register masks.
 171   init_spill_mask( C->root()->in(1) );
 172   _return_addr_mask = return_addr();
 173 #ifdef _LP64
 174   // Pointers take 2 slots in 64-bit land
 175   _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
 176 #endif
 177 
 178   // Map a Java-signature return type into return register-value
 179   // machine registers for 0, 1 and 2 returned values.
 180   const TypeTuple *range = C->tf()->range();
 181   if( range->cnt() > TypeFunc::Parms ) { // If not a void function
 182     // Get ideal-register return type
 183     int ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
 184     // Get machine return register
 185     uint sop = C->start()->Opcode();
 186     OptoRegPair regs = return_value(ireg, false);
 187 
 188     // And mask for same
 189     _return_value_mask = RegMask(regs.first());
 190     if( OptoReg::is_valid(regs.second()) )
 191       _return_value_mask.Insert(regs.second());
 192   }
 193 
 194   // ---------------
 195   // Frame Layout
 196 
 197   // Need the method signature to determine the incoming argument types,
 198   // because the types determine which registers the incoming arguments are
 199   // in, and this affects the matched code.
 200   const TypeTuple *domain = C->tf()->domain();
 201   uint             argcnt = domain->cnt() - TypeFunc::Parms;
 202   BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
 203   VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
 204   _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
 205   _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
 206   uint i;
 207   for( i = 0; i<argcnt; i++ ) {
 208     sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
 209   }
 210 
 211   // Pass array of ideal registers and length to USER code (from the AD file)
 212   // that will convert this to an array of register numbers.
 213   const StartNode *start = C->start();
 214   start->calling_convention( sig_bt, vm_parm_regs, argcnt );
 215 #ifdef ASSERT
 216   // Sanity check users' calling convention.  Real handy while trying to
 217   // get the initial port correct.
 218   { for (uint i = 0; i<argcnt; i++) {
 219       if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 220         assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
 221         _parm_regs[i].set_bad();
 222         continue;
 223       }
 224       VMReg parm_reg = vm_parm_regs[i].first();
 225       assert(parm_reg->is_valid(), "invalid arg?");
 226       if (parm_reg->is_reg()) {
 227         OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
 228         assert(can_be_java_arg(opto_parm_reg) ||
 229                C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
 230                opto_parm_reg == inline_cache_reg(),
 231                "parameters in register must be preserved by runtime stubs");
 232       }
 233       for (uint j = 0; j < i; j++) {
 234         assert(parm_reg != vm_parm_regs[j].first(),
 235                "calling conv. must produce distinct regs");
 236       }
 237     }
 238   }
 239 #endif
 240 
 241   // Do some initial frame layout.
 242 
 243   // Compute the old incoming SP (may be called FP) as
 244   //   OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
 245   _old_SP = C->compute_old_SP();
 246   assert( is_even(_old_SP), "must be even" );
 247 
 248   // Compute highest incoming stack argument as
 249   //   _old_SP + out_preserve_stack_slots + incoming argument size.
 250   _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 251   assert( is_even(_in_arg_limit), "out_preserve must be even" );
 252   for( i = 0; i < argcnt; i++ ) {
 253     // Permit args to have no register
 254     _calling_convention_mask[i].Clear();
 255     if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 256       continue;
 257     }
 258     // calling_convention returns stack arguments as a count of
 259     // slots beyond OptoReg::stack0()/VMRegImpl::stack0.  We need to convert this to
 260     // the allocators point of view, taking into account all the
 261     // preserve area, locks & pad2.
 262 
 263     OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
 264     if( OptoReg::is_valid(reg1))
 265       _calling_convention_mask[i].Insert(reg1);
 266 
 267     OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
 268     if( OptoReg::is_valid(reg2))
 269       _calling_convention_mask[i].Insert(reg2);
 270 
 271     // Saved biased stack-slot register number
 272     _parm_regs[i].set_pair(reg2, reg1);
 273   }
 274 
 275   // Finally, make sure the incoming arguments take up an even number of
 276   // words, in case the arguments or locals need to contain doubleword stack
 277   // slots.  The rest of the system assumes that stack slot pairs (in
 278   // particular, in the spill area) which look aligned will in fact be
 279   // aligned relative to the stack pointer in the target machine.  Double
 280   // stack slots will always be allocated aligned.
 281   _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
 282 
 283   // Compute highest outgoing stack argument as
 284   //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
 285   _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
 286   assert( is_even(_out_arg_limit), "out_preserve must be even" );
 287 
 288   if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
 289     // the compiler cannot represent this method's calling sequence
 290     C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
 291   }
 292 
 293   if (C->failing())  return;  // bailed out on incoming arg failure
 294 
 295   // ---------------
 296   // Collect roots of matcher trees.  Every node for which
 297   // _shared[_idx] is cleared is guaranteed to not be shared, and thus
 298   // can be a valid interior of some tree.
 299   find_shared( C->root() );
 300   find_shared( C->top() );
 301 
 302   C->print_method(PHASE_BEFORE_MATCHING);
 303 
 304   // Create new ideal node ConP #NULL even if it does exist in old space
 305   // to avoid false sharing if the corresponding mach node is not used.
 306   // The corresponding mach node is only used in rare cases for derived
 307   // pointers.
 308   Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
 309 
 310   // Swap out to old-space; emptying new-space
 311   Arena *old = C->node_arena()->move_contents(C->old_arena());
 312 
 313   // Save debug and profile information for nodes in old space:
 314   _old_node_note_array = C->node_note_array();
 315   if (_old_node_note_array != NULL) {
 316     C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
 317                            (C->comp_arena(), _old_node_note_array->length(),
 318                             0, NULL));
 319   }
 320 
 321   // Pre-size the new_node table to avoid the need for range checks.
 322   grow_new_node_array(C->unique());
 323 
 324   // Reset node counter so MachNodes start with _idx at 0
 325   int nodes = C->unique(); // save value
 326   C->set_unique(0);
 327   C->reset_dead_node_list();
 328 
 329   // Recursively match trees from old space into new space.
 330   // Correct leaves of new-space Nodes; they point to old-space.
 331   _visited.Clear();             // Clear visit bits for xform call
 332   C->set_cached_top_node(xform( C->top(), nodes ));
 333   if (!C->failing()) {
 334     Node* xroot =        xform( C->root(), 1 );
 335     if (xroot == NULL) {
 336       Matcher::soft_match_failure();  // recursive matching process failed
 337       C->record_method_not_compilable("instruction match failed");
 338     } else {
 339       // During matching shared constants were attached to C->root()
 340       // because xroot wasn't available yet, so transfer the uses to
 341       // the xroot.
 342       for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
 343         Node* n = C->root()->fast_out(j);
 344         if (C->node_arena()->contains(n)) {
 345           assert(n->in(0) == C->root(), "should be control user");
 346           n->set_req(0, xroot);
 347           --j;
 348           --jmax;
 349         }
 350       }
 351 
 352       // Generate new mach node for ConP #NULL
 353       assert(new_ideal_null != NULL, "sanity");
 354       _mach_null = match_tree(new_ideal_null);
 355       // Don't set control, it will confuse GCM since there are no uses.
 356       // The control will be set when this node is used first time
 357       // in find_base_for_derived().
 358       assert(_mach_null != NULL, "");
 359 
 360       C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
 361 
 362 #ifdef ASSERT
 363       verify_new_nodes_only(xroot);
 364 #endif
 365     }
 366   }
 367   if (C->top() == NULL || C->root() == NULL) {
 368     C->record_method_not_compilable("graph lost"); // %%% cannot happen?
 369   }
 370   if (C->failing()) {
 371     // delete old;
 372     old->destruct_contents();
 373     return;
 374   }
 375   assert( C->top(), "" );
 376   assert( C->root(), "" );
 377   validate_null_checks();
 378 
 379   // Now smoke old-space
 380   NOT_DEBUG( old->destruct_contents() );
 381 
 382   // ------------------------
 383   // Set up save-on-entry registers
 384   Fixup_Save_On_Entry( );
 385 }
 386 
 387 
 388 //------------------------------Fixup_Save_On_Entry----------------------------
 389 // The stated purpose of this routine is to take care of save-on-entry
 390 // registers.  However, the overall goal of the Match phase is to convert into
 391 // machine-specific instructions which have RegMasks to guide allocation.
 392 // So what this procedure really does is put a valid RegMask on each input
 393 // to the machine-specific variations of all Return, TailCall and Halt
 394 // instructions.  It also adds edgs to define the save-on-entry values (and of
 395 // course gives them a mask).
 396 
 397 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 398   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 399   // Do all the pre-defined register masks
 400   rms[TypeFunc::Control  ] = RegMask::Empty;
 401   rms[TypeFunc::I_O      ] = RegMask::Empty;
 402   rms[TypeFunc::Memory   ] = RegMask::Empty;
 403   rms[TypeFunc::ReturnAdr] = ret_adr;
 404   rms[TypeFunc::FramePtr ] = fp;
 405   return rms;
 406 }
 407 
 408 //---------------------------init_first_stack_mask-----------------------------
 409 // Create the initial stack mask used by values spilling to the stack.
 410 // Disallow any debug info in outgoing argument areas by setting the
 411 // initial mask accordingly.
 412 void Matcher::init_first_stack_mask() {
 413 
 414   // Allocate storage for spill masks as masks for the appropriate load type.
 415   RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+4));
 416 
 417   idealreg2spillmask  [Op_RegN] = &rms[0];
 418   idealreg2spillmask  [Op_RegI] = &rms[1];
 419   idealreg2spillmask  [Op_RegL] = &rms[2];
 420   idealreg2spillmask  [Op_RegF] = &rms[3];
 421   idealreg2spillmask  [Op_RegD] = &rms[4];
 422   idealreg2spillmask  [Op_RegP] = &rms[5];
 423 
 424   idealreg2debugmask  [Op_RegN] = &rms[6];
 425   idealreg2debugmask  [Op_RegI] = &rms[7];
 426   idealreg2debugmask  [Op_RegL] = &rms[8];
 427   idealreg2debugmask  [Op_RegF] = &rms[9];
 428   idealreg2debugmask  [Op_RegD] = &rms[10];
 429   idealreg2debugmask  [Op_RegP] = &rms[11];
 430 
 431   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 432   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 433   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 434   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 435   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 436   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 437 
 438   idealreg2spillmask  [Op_VecS] = &rms[18];
 439   idealreg2spillmask  [Op_VecD] = &rms[19];
 440   idealreg2spillmask  [Op_VecX] = &rms[20];
 441   idealreg2spillmask  [Op_VecY] = &rms[21];
 442 
 443   OptoReg::Name i;
 444 
 445   // At first, start with the empty mask
 446   C->FIRST_STACK_mask().Clear();
 447 
 448   // Add in the incoming argument area
 449   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 450   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 451     C->FIRST_STACK_mask().Insert(i);
 452   }
 453   // Add in all bits past the outgoing argument area
 454   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 455             "must be able to represent all call arguments in reg mask");
 456   OptoReg::Name init = _out_arg_limit;
 457   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 458     C->FIRST_STACK_mask().Insert(i);
 459   }
 460   // Finally, set the "infinite stack" bit.
 461   C->FIRST_STACK_mask().set_AllStack();
 462 
 463   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 464   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 465   // Keep spill masks aligned.
 466   aligned_stack_mask.clear_to_pairs();
 467   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 468 
 469   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 470 #ifdef _LP64
 471   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 472    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 473    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 474 #else
 475    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 476 #endif
 477   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 478    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 479   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 480    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 481   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 482    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 483   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 484    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 485 
 486   if (Matcher::vector_size_supported(T_BYTE,4)) {
 487     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
 488      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
 489   }
 490   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 491     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 492     // RA guarantees such alignment since it is needed for Double and Long values.
 493     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
 494      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
 495   }
 496   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 497     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
 498     //
 499     // RA can use input arguments stack slots for spills but until RA
 500     // we don't know frame size and offset of input arg stack slots.
 501     //
 502     // Exclude last input arg stack slots to avoid spilling vectors there
 503     // otherwise vector spills could stomp over stack slots in caller frame.
 504     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 505     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
 506       aligned_stack_mask.Remove(in);
 507       in = OptoReg::add(in, -1);
 508     }
 509      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 510      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 511     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
 512      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
 513   }
 514   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 515     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 516     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 517     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 518       aligned_stack_mask.Remove(in);
 519       in = OptoReg::add(in, -1);
 520     }
 521      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 522      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 523     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 524      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 525   }
 526    if (UseFPUForSpilling) {
 527      // This mask logic assumes that the spill operations are
 528      // symmetric and that the registers involved are the same size.
 529      // On sparc for instance we may have to use 64 bit moves will
 530      // kill 2 registers when used with F0-F31.
 531      idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 532      idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 533 #ifdef _LP64
 534      idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 535      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 536      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 537      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 538 #else
 539      idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 540 #ifdef ARM
 541      // ARM has support for moving 64bit values between a pair of
 542      // integer registers and a double register
 543      idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 544      idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 545 #endif
 546 #endif
 547    }
 548 
 549   // Make up debug masks.  Any spill slot plus callee-save registers.
 550   // Caller-save registers are assumed to be trashable by the various
 551   // inline-cache fixup routines.
 552   *idealreg2debugmask  [Op_RegN]= *idealreg2spillmask[Op_RegN];
 553   *idealreg2debugmask  [Op_RegI]= *idealreg2spillmask[Op_RegI];
 554   *idealreg2debugmask  [Op_RegL]= *idealreg2spillmask[Op_RegL];
 555   *idealreg2debugmask  [Op_RegF]= *idealreg2spillmask[Op_RegF];
 556   *idealreg2debugmask  [Op_RegD]= *idealreg2spillmask[Op_RegD];
 557   *idealreg2debugmask  [Op_RegP]= *idealreg2spillmask[Op_RegP];
 558 
 559   *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
 560   *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
 561   *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
 562   *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
 563   *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
 564   *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
 565 
 566   // Prevent stub compilations from attempting to reference
 567   // callee-saved registers from debug info
 568   bool exclude_soe = !Compile::current()->is_method_compilation();
 569 
 570   for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
 571     // registers the caller has to save do not work
 572     if( _register_save_policy[i] == 'C' ||
 573         _register_save_policy[i] == 'A' ||
 574         (_register_save_policy[i] == 'E' && exclude_soe) ) {
 575       idealreg2debugmask  [Op_RegN]->Remove(i);
 576       idealreg2debugmask  [Op_RegI]->Remove(i); // Exclude save-on-call
 577       idealreg2debugmask  [Op_RegL]->Remove(i); // registers from debug
 578       idealreg2debugmask  [Op_RegF]->Remove(i); // masks
 579       idealreg2debugmask  [Op_RegD]->Remove(i);
 580       idealreg2debugmask  [Op_RegP]->Remove(i);
 581 
 582       idealreg2mhdebugmask[Op_RegN]->Remove(i);
 583       idealreg2mhdebugmask[Op_RegI]->Remove(i);
 584       idealreg2mhdebugmask[Op_RegL]->Remove(i);
 585       idealreg2mhdebugmask[Op_RegF]->Remove(i);
 586       idealreg2mhdebugmask[Op_RegD]->Remove(i);
 587       idealreg2mhdebugmask[Op_RegP]->Remove(i);
 588     }
 589   }
 590 
 591   // Subtract the register we use to save the SP for MethodHandle
 592   // invokes to from the debug mask.
 593   const RegMask save_mask = method_handle_invoke_SP_save_mask();
 594   idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
 595   idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
 596   idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
 597   idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
 598   idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
 599   idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
 600 }
 601 
 602 //---------------------------is_save_on_entry----------------------------------
 603 bool Matcher::is_save_on_entry( int reg ) {
 604   return
 605     _register_save_policy[reg] == 'E' ||
 606     _register_save_policy[reg] == 'A' || // Save-on-entry register?
 607     // Also save argument registers in the trampolining stubs
 608     (C->save_argument_registers() && is_spillable_arg(reg));
 609 }
 610 
 611 //---------------------------Fixup_Save_On_Entry-------------------------------
 612 void Matcher::Fixup_Save_On_Entry( ) {
 613   init_first_stack_mask();
 614 
 615   Node *root = C->root();       // Short name for root
 616   // Count number of save-on-entry registers.
 617   uint soe_cnt = number_of_saved_registers();
 618   uint i;
 619 
 620   // Find the procedure Start Node
 621   StartNode *start = C->start();
 622   assert( start, "Expect a start node" );
 623 
 624   // Save argument registers in the trampolining stubs
 625   if( C->save_argument_registers() )
 626     for( i = 0; i < _last_Mach_Reg; i++ )
 627       if( is_spillable_arg(i) )
 628         soe_cnt++;
 629 
 630   // Input RegMask array shared by all Returns.
 631   // The type for doubles and longs has a count of 2, but
 632   // there is only 1 returned value
 633   uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
 634   RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 635   // Returns have 0 or 1 returned values depending on call signature.
 636   // Return register is specified by return_value in the AD file.
 637   if (ret_edge_cnt > TypeFunc::Parms)
 638     ret_rms[TypeFunc::Parms+0] = _return_value_mask;
 639 
 640   // Input RegMask array shared by all Rethrows.
 641   uint reth_edge_cnt = TypeFunc::Parms+1;
 642   RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 643   // Rethrow takes exception oop only, but in the argument 0 slot.
 644   reth_rms[TypeFunc::Parms] = mreg2regmask[find_receiver(false)];
 645 #ifdef _LP64
 646   // Need two slots for ptrs in 64-bit land
 647   reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
 648 #endif
 649 
 650   // Input RegMask array shared by all TailCalls
 651   uint tail_call_edge_cnt = TypeFunc::Parms+2;
 652   RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 653 
 654   // Input RegMask array shared by all TailJumps
 655   uint tail_jump_edge_cnt = TypeFunc::Parms+2;
 656   RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 657 
 658   // TailCalls have 2 returned values (target & moop), whose masks come
 659   // from the usual MachNode/MachOper mechanism.  Find a sample
 660   // TailCall to extract these masks and put the correct masks into
 661   // the tail_call_rms array.
 662   for( i=1; i < root->req(); i++ ) {
 663     MachReturnNode *m = root->in(i)->as_MachReturn();
 664     if( m->ideal_Opcode() == Op_TailCall ) {
 665       tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 666       tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 667       break;
 668     }
 669   }
 670 
 671   // TailJumps have 2 returned values (target & ex_oop), whose masks come
 672   // from the usual MachNode/MachOper mechanism.  Find a sample
 673   // TailJump to extract these masks and put the correct masks into
 674   // the tail_jump_rms array.
 675   for( i=1; i < root->req(); i++ ) {
 676     MachReturnNode *m = root->in(i)->as_MachReturn();
 677     if( m->ideal_Opcode() == Op_TailJump ) {
 678       tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 679       tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 680       break;
 681     }
 682   }
 683 
 684   // Input RegMask array shared by all Halts
 685   uint halt_edge_cnt = TypeFunc::Parms;
 686   RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 687 
 688   // Capture the return input masks into each exit flavor
 689   for( i=1; i < root->req(); i++ ) {
 690     MachReturnNode *exit = root->in(i)->as_MachReturn();
 691     switch( exit->ideal_Opcode() ) {
 692       case Op_Return   : exit->_in_rms = ret_rms;  break;
 693       case Op_Rethrow  : exit->_in_rms = reth_rms; break;
 694       case Op_TailCall : exit->_in_rms = tail_call_rms; break;
 695       case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
 696       case Op_Halt     : exit->_in_rms = halt_rms; break;
 697       default          : ShouldNotReachHere();
 698     }
 699   }
 700 
 701   // Next unused projection number from Start.
 702   int proj_cnt = C->tf()->domain()->cnt();
 703 
 704   // Do all the save-on-entry registers.  Make projections from Start for
 705   // them, and give them a use at the exit points.  To the allocator, they
 706   // look like incoming register arguments.
 707   for( i = 0; i < _last_Mach_Reg; i++ ) {
 708     if( is_save_on_entry(i) ) {
 709 
 710       // Add the save-on-entry to the mask array
 711       ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
 712       reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
 713       tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
 714       tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
 715       // Halts need the SOE registers, but only in the stack as debug info.
 716       // A just-prior uncommon-trap or deoptimization will use the SOE regs.
 717       halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
 718 
 719       Node *mproj;
 720 
 721       // Is this a RegF low half of a RegD?  Double up 2 adjacent RegF's
 722       // into a single RegD.
 723       if( (i&1) == 0 &&
 724           _register_save_type[i  ] == Op_RegF &&
 725           _register_save_type[i+1] == Op_RegF &&
 726           is_save_on_entry(i+1) ) {
 727         // Add other bit for double
 728         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 729         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 730         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 731         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 732         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 733         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
 734         proj_cnt += 2;          // Skip 2 for doubles
 735       }
 736       else if( (i&1) == 1 &&    // Else check for high half of double
 737                _register_save_type[i-1] == Op_RegF &&
 738                _register_save_type[i  ] == Op_RegF &&
 739                is_save_on_entry(i-1) ) {
 740         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 741         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 742         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 743         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 744         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 745         mproj = C->top();
 746       }
 747       // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
 748       // into a single RegL.
 749       else if( (i&1) == 0 &&
 750           _register_save_type[i  ] == Op_RegI &&
 751           _register_save_type[i+1] == Op_RegI &&
 752         is_save_on_entry(i+1) ) {
 753         // Add other bit for long
 754         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 755         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 756         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 757         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 758         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 759         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
 760         proj_cnt += 2;          // Skip 2 for longs
 761       }
 762       else if( (i&1) == 1 &&    // Else check for high half of long
 763                _register_save_type[i-1] == Op_RegI &&
 764                _register_save_type[i  ] == Op_RegI &&
 765                is_save_on_entry(i-1) ) {
 766         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 767         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 768         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 769         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 770         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 771         mproj = C->top();
 772       } else {
 773         // Make a projection for it off the Start
 774         mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
 775       }
 776 
 777       ret_edge_cnt ++;
 778       reth_edge_cnt ++;
 779       tail_call_edge_cnt ++;
 780       tail_jump_edge_cnt ++;
 781       halt_edge_cnt ++;
 782 
 783       // Add a use of the SOE register to all exit paths
 784       for( uint j=1; j < root->req(); j++ )
 785         root->in(j)->add_req(mproj);
 786     } // End of if a save-on-entry register
 787   } // End of for all machine registers
 788 }
 789 
 790 //------------------------------init_spill_mask--------------------------------
 791 void Matcher::init_spill_mask( Node *ret ) {
 792   if( idealreg2regmask[Op_RegI] ) return; // One time only init
 793 
 794   OptoReg::c_frame_pointer = c_frame_pointer();
 795   c_frame_ptr_mask = c_frame_pointer();
 796 #ifdef _LP64
 797   // pointers are twice as big
 798   c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
 799 #endif
 800 
 801   // Start at OptoReg::stack0()
 802   STACK_ONLY_mask.Clear();
 803   OptoReg::Name init = OptoReg::stack2reg(0);
 804   // STACK_ONLY_mask is all stack bits
 805   OptoReg::Name i;
 806   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
 807     STACK_ONLY_mask.Insert(i);
 808   // Also set the "infinite stack" bit.
 809   STACK_ONLY_mask.set_AllStack();
 810 
 811   // Copy the register names over into the shared world
 812   for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
 813     // SharedInfo::regName[i] = regName[i];
 814     // Handy RegMasks per machine register
 815     mreg2regmask[i].Insert(i);
 816   }
 817 
 818   // Grab the Frame Pointer
 819   Node *fp  = ret->in(TypeFunc::FramePtr);
 820   Node *mem = ret->in(TypeFunc::Memory);
 821   const TypePtr* atp = TypePtr::BOTTOM;
 822   // Share frame pointer while making spill ops
 823   set_shared(fp);
 824 
 825   // Compute generic short-offset Loads
 826 #ifdef _LP64
 827   MachNode *spillCP = match_tree(new LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
 828 #endif
 829   MachNode *spillI  = match_tree(new LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
 830   MachNode *spillL  = match_tree(new LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false));
 831   MachNode *spillF  = match_tree(new LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
 832   MachNode *spillD  = match_tree(new LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
 833   MachNode *spillP  = match_tree(new LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
 834   assert(spillI != NULL && spillL != NULL && spillF != NULL &&
 835          spillD != NULL && spillP != NULL, "");
 836   // Get the ADLC notion of the right regmask, for each basic type.
 837 #ifdef _LP64
 838   idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
 839 #endif
 840   idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
 841   idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
 842   idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
 843   idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
 844   idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
 845 
 846   // Vector regmasks.
 847   if (Matcher::vector_size_supported(T_BYTE,4)) {
 848     TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
 849     MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
 850     idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask();
 851   }
 852   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 853     MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
 854     idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask();
 855   }
 856   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 857     MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
 858     idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask();
 859   }
 860   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 861     MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
 862     idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();
 863   }
 864 }
 865 
 866 #ifdef ASSERT
 867 static void match_alias_type(Compile* C, Node* n, Node* m) {
 868   if (!VerifyAliases)  return;  // do not go looking for trouble by default
 869   const TypePtr* nat = n->adr_type();
 870   const TypePtr* mat = m->adr_type();
 871   int nidx = C->get_alias_index(nat);
 872   int midx = C->get_alias_index(mat);
 873   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
 874   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
 875     for (uint i = 1; i < n->req(); i++) {
 876       Node* n1 = n->in(i);
 877       const TypePtr* n1at = n1->adr_type();
 878       if (n1at != NULL) {
 879         nat = n1at;
 880         nidx = C->get_alias_index(n1at);
 881       }
 882     }
 883   }
 884   // %%% Kludgery.  Instead, fix ideal adr_type methods for all these cases:
 885   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
 886     switch (n->Opcode()) {
 887     case Op_PrefetchRead:
 888     case Op_PrefetchWrite:
 889     case Op_PrefetchAllocation:
 890       nidx = Compile::AliasIdxRaw;
 891       nat = TypeRawPtr::BOTTOM;
 892       break;
 893     }
 894   }
 895   if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
 896     switch (n->Opcode()) {
 897     case Op_ClearArray:
 898       midx = Compile::AliasIdxRaw;
 899       mat = TypeRawPtr::BOTTOM;
 900       break;
 901     }
 902   }
 903   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
 904     switch (n->Opcode()) {
 905     case Op_Return:
 906     case Op_Rethrow:
 907     case Op_Halt:
 908     case Op_TailCall:
 909     case Op_TailJump:
 910       nidx = Compile::AliasIdxBot;
 911       nat = TypePtr::BOTTOM;
 912       break;
 913     }
 914   }
 915   if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
 916     switch (n->Opcode()) {
 917     case Op_StrComp:
 918     case Op_StrEquals:
 919     case Op_StrIndexOf:
 920     case Op_AryEq:
 921     case Op_MemBarVolatile:
 922     case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
 923     case Op_EncodeISOArray:
 924       nidx = Compile::AliasIdxTop;
 925       nat = NULL;
 926       break;
 927     }
 928   }
 929   if (nidx != midx) {
 930     if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
 931       tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
 932       n->dump();
 933       m->dump();
 934     }
 935     assert(C->subsume_loads() && C->must_alias(nat, midx),
 936            "must not lose alias info when matching");
 937   }
 938 }
 939 #endif
 940 
 941 
 942 //------------------------------MStack-----------------------------------------
 943 // State and MStack class used in xform() and find_shared() iterative methods.
 944 enum Node_State { Pre_Visit,  // node has to be pre-visited
 945                       Visit,  // visit node
 946                  Post_Visit,  // post-visit node
 947              Alt_Post_Visit   // alternative post-visit path
 948                 };
 949 
 950 class MStack: public Node_Stack {
 951   public:
 952     MStack(int size) : Node_Stack(size) { }
 953 
 954     void push(Node *n, Node_State ns) {
 955       Node_Stack::push(n, (uint)ns);
 956     }
 957     void push(Node *n, Node_State ns, Node *parent, int indx) {
 958       ++_inode_top;
 959       if ((_inode_top + 1) >= _inode_max) grow();
 960       _inode_top->node = parent;
 961       _inode_top->indx = (uint)indx;
 962       ++_inode_top;
 963       _inode_top->node = n;
 964       _inode_top->indx = (uint)ns;
 965     }
 966     Node *parent() {
 967       pop();
 968       return node();
 969     }
 970     Node_State state() const {
 971       return (Node_State)index();
 972     }
 973     void set_state(Node_State ns) {
 974       set_index((uint)ns);
 975     }
 976 };
 977 
 978 
 979 //------------------------------xform------------------------------------------
 980 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
 981 // Node in new-space.  Given a new-space Node, recursively walk his children.
 982 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
 983 Node *Matcher::xform( Node *n, int max_stack ) {
 984   // Use one stack to keep both: child's node/state and parent's node/index
 985   MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2
 986   mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
 987 
 988   while (mstack.is_nonempty()) {
 989     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
 990     if (C->failing()) return NULL;
 991     n = mstack.node();          // Leave node on stack
 992     Node_State nstate = mstack.state();
 993     if (nstate == Visit) {
 994       mstack.set_state(Post_Visit);
 995       Node *oldn = n;
 996       // Old-space or new-space check
 997       if (!C->node_arena()->contains(n)) {
 998         // Old space!
 999         Node* m;
1000         if (has_new_node(n)) {  // Not yet Label/Reduced
1001           m = new_node(n);
1002         } else {
1003           if (!is_dontcare(n)) { // Matcher can match this guy
1004             // Calls match special.  They match alone with no children.
1005             // Their children, the incoming arguments, match normally.
1006             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1007             if (C->failing())  return NULL;
1008             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1009           } else {                  // Nothing the matcher cares about
1010             if( n->is_Proj() && n->in(0)->is_Multi()) {       // Projections?
1011               // Convert to machine-dependent projection
1012               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1013 #ifdef ASSERT
1014               _new2old_map.map(m->_idx, n);
1015 #endif
1016               if (m->in(0) != NULL) // m might be top
1017                 collect_null_checks(m, n);
1018             } else {                // Else just a regular 'ol guy
1019               m = n->clone();       // So just clone into new-space
1020 #ifdef ASSERT
1021               _new2old_map.map(m->_idx, n);
1022 #endif
1023               // Def-Use edges will be added incrementally as Uses
1024               // of this node are matched.
1025               assert(m->outcnt() == 0, "no Uses of this clone yet");
1026             }
1027           }
1028 
1029           set_new_node(n, m);       // Map old to new
1030           if (_old_node_note_array != NULL) {
1031             Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1032                                                   n->_idx);
1033             C->set_node_notes_at(m->_idx, nn);
1034           }
1035           debug_only(match_alias_type(C, n, m));
1036         }
1037         n = m;    // n is now a new-space node
1038         mstack.set_node(n);
1039       }
1040 
1041       // New space!
1042       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1043 
1044       int i;
1045       // Put precedence edges on stack first (match them last).
1046       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1047         Node *m = oldn->in(i);
1048         if (m == NULL) break;
1049         // set -1 to call add_prec() instead of set_req() during Step1
1050         mstack.push(m, Visit, n, -1);
1051       }
1052 
1053       // For constant debug info, I'd rather have unmatched constants.
1054       int cnt = n->req();
1055       JVMState* jvms = n->jvms();
1056       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1057 
1058       // Now do only debug info.  Clone constants rather than matching.
1059       // Constants are represented directly in the debug info without
1060       // the need for executable machine instructions.
1061       // Monitor boxes are also represented directly.
1062       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1063         Node *m = n->in(i);          // Get input
1064         int op = m->Opcode();
1065         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1066         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1067             op == Op_ConF || op == Op_ConD || op == Op_ConL
1068             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1069             ) {
1070           m = m->clone();
1071 #ifdef ASSERT
1072           _new2old_map.map(m->_idx, n);
1073 #endif
1074           mstack.push(m, Post_Visit, n, i); // Don't need to visit
1075           mstack.push(m->in(0), Visit, m, 0);
1076         } else {
1077           mstack.push(m, Visit, n, i);
1078         }
1079       }
1080 
1081       // And now walk his children, and convert his inputs to new-space.
1082       for( ; i >= 0; --i ) { // For all normal inputs do
1083         Node *m = n->in(i);  // Get input
1084         if(m != NULL)
1085           mstack.push(m, Visit, n, i);
1086       }
1087 
1088     }
1089     else if (nstate == Post_Visit) {
1090       // Set xformed input
1091       Node *p = mstack.parent();
1092       if (p != NULL) { // root doesn't have parent
1093         int i = (int)mstack.index();
1094         if (i >= 0)
1095           p->set_req(i, n); // required input
1096         else if (i == -1)
1097           p->add_prec(n);   // precedence input
1098         else
1099           ShouldNotReachHere();
1100       }
1101       mstack.pop(); // remove processed node from stack
1102     }
1103     else {
1104       ShouldNotReachHere();
1105     }
1106   } // while (mstack.is_nonempty())
1107   return n; // Return new-space Node
1108 }
1109 
1110 //------------------------------warp_outgoing_stk_arg------------------------
1111 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1112   // Convert outgoing argument location to a pre-biased stack offset
1113   if (reg->is_stack()) {
1114     OptoReg::Name warped = reg->reg2stack();
1115     // Adjust the stack slot offset to be the register number used
1116     // by the allocator.
1117     warped = OptoReg::add(begin_out_arg_area, warped);
1118     // Keep track of the largest numbered stack slot used for an arg.
1119     // Largest used slot per call-site indicates the amount of stack
1120     // that is killed by the call.
1121     if( warped >= out_arg_limit_per_call )
1122       out_arg_limit_per_call = OptoReg::add(warped,1);
1123     if (!RegMask::can_represent_arg(warped)) {
1124       C->record_method_not_compilable_all_tiers("unsupported calling sequence");
1125       return OptoReg::Bad;
1126     }
1127     return warped;
1128   }
1129   return OptoReg::as_OptoReg(reg);
1130 }
1131 
1132 
1133 //------------------------------match_sfpt-------------------------------------
1134 // Helper function to match call instructions.  Calls match special.
1135 // They match alone with no children.  Their children, the incoming
1136 // arguments, match normally.
1137 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1138   MachSafePointNode *msfpt = NULL;
1139   MachCallNode      *mcall = NULL;
1140   uint               cnt;
1141   // Split out case for SafePoint vs Call
1142   CallNode *call;
1143   const TypeTuple *domain;
1144   ciMethod*        method = NULL;
1145   bool             is_method_handle_invoke = false;  // for special kill effects
1146   if( sfpt->is_Call() ) {
1147     call = sfpt->as_Call();
1148     domain = call->tf()->domain();
1149     cnt = domain->cnt();
1150 
1151     // Match just the call, nothing else
1152     MachNode *m = match_tree(call);
1153     if (C->failing())  return NULL;
1154     if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1155 
1156     // Copy data from the Ideal SafePoint to the machine version
1157     mcall = m->as_MachCall();
1158 
1159     mcall->set_tf(         call->tf());
1160     mcall->set_entry_point(call->entry_point());
1161     mcall->set_cnt(        call->cnt());
1162 
1163     if( mcall->is_MachCallJava() ) {
1164       MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
1165       const CallJavaNode *call_java =  call->as_CallJava();
1166       method = call_java->method();
1167       mcall_java->_method = method;
1168       mcall_java->_bci = call_java->_bci;
1169       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1170       is_method_handle_invoke = call_java->is_method_handle_invoke();
1171       mcall_java->_method_handle_invoke = is_method_handle_invoke;
1172       if (is_method_handle_invoke) {
1173         C->set_has_method_handle_invokes(true);
1174       }
1175       if( mcall_java->is_MachCallStaticJava() )
1176         mcall_java->as_MachCallStaticJava()->_name =
1177          call_java->as_CallStaticJava()->_name;
1178       if( mcall_java->is_MachCallDynamicJava() )
1179         mcall_java->as_MachCallDynamicJava()->_vtable_index =
1180          call_java->as_CallDynamicJava()->_vtable_index;
1181     }
1182     else if( mcall->is_MachCallRuntime() ) {
1183       mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1184     }
1185     msfpt = mcall;
1186   }
1187   // This is a non-call safepoint
1188   else {
1189     call = NULL;
1190     domain = NULL;
1191     MachNode *mn = match_tree(sfpt);
1192     if (C->failing())  return NULL;
1193     msfpt = mn->as_MachSafePoint();
1194     cnt = TypeFunc::Parms;
1195   }
1196 
1197   // Advertise the correct memory effects (for anti-dependence computation).
1198   msfpt->set_adr_type(sfpt->adr_type());
1199 
1200   // Allocate a private array of RegMasks.  These RegMasks are not shared.
1201   msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1202   // Empty them all.
1203   memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
1204 
1205   // Do all the pre-defined non-Empty register masks
1206   msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1207   msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1208 
1209   // Place first outgoing argument can possibly be put.
1210   OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1211   assert( is_even(begin_out_arg_area), "" );
1212   // Compute max outgoing register number per call site.
1213   OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1214   // Calls to C may hammer extra stack slots above and beyond any arguments.
1215   // These are usually backing store for register arguments for varargs.
1216   if( call != NULL && call->is_CallRuntime() )
1217     out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1218 
1219 
1220   // Do the normal argument list (parameters) register masks
1221   int argcnt = cnt - TypeFunc::Parms;
1222   if( argcnt > 0 ) {          // Skip it all if we have no args
1223     BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1224     VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1225     int i;
1226     for( i = 0; i < argcnt; i++ ) {
1227       sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1228     }
1229     // V-call to pick proper calling convention
1230     call->calling_convention( sig_bt, parm_regs, argcnt );
1231 
1232 #ifdef ASSERT
1233     // Sanity check users' calling convention.  Really handy during
1234     // the initial porting effort.  Fairly expensive otherwise.
1235     { for (int i = 0; i<argcnt; i++) {
1236       if( !parm_regs[i].first()->is_valid() &&
1237           !parm_regs[i].second()->is_valid() ) continue;
1238       VMReg reg1 = parm_regs[i].first();
1239       VMReg reg2 = parm_regs[i].second();
1240       for (int j = 0; j < i; j++) {
1241         if( !parm_regs[j].first()->is_valid() &&
1242             !parm_regs[j].second()->is_valid() ) continue;
1243         VMReg reg3 = parm_regs[j].first();
1244         VMReg reg4 = parm_regs[j].second();
1245         if( !reg1->is_valid() ) {
1246           assert( !reg2->is_valid(), "valid halvsies" );
1247         } else if( !reg3->is_valid() ) {
1248           assert( !reg4->is_valid(), "valid halvsies" );
1249         } else {
1250           assert( reg1 != reg2, "calling conv. must produce distinct regs");
1251           assert( reg1 != reg3, "calling conv. must produce distinct regs");
1252           assert( reg1 != reg4, "calling conv. must produce distinct regs");
1253           assert( reg2 != reg3, "calling conv. must produce distinct regs");
1254           assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1255           assert( reg3 != reg4, "calling conv. must produce distinct regs");
1256         }
1257       }
1258     }
1259     }
1260 #endif
1261 
1262     // Visit each argument.  Compute its outgoing register mask.
1263     // Return results now can have 2 bits returned.
1264     // Compute max over all outgoing arguments both per call-site
1265     // and over the entire method.
1266     for( i = 0; i < argcnt; i++ ) {
1267       // Address of incoming argument mask to fill in
1268       RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1269       if( !parm_regs[i].first()->is_valid() &&
1270           !parm_regs[i].second()->is_valid() ) {
1271         continue;               // Avoid Halves
1272       }
1273       // Grab first register, adjust stack slots and insert in mask.
1274       OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1275       if (OptoReg::is_valid(reg1))
1276         rm->Insert( reg1 );
1277       // Grab second register (if any), adjust stack slots and insert in mask.
1278       OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1279       if (OptoReg::is_valid(reg2))
1280         rm->Insert( reg2 );
1281     } // End of for all arguments
1282 
1283     // Compute number of stack slots needed to restore stack in case of
1284     // Pascal-style argument popping.
1285     mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1286   }
1287 
1288   // Compute the max stack slot killed by any call.  These will not be
1289   // available for debug info, and will be used to adjust FIRST_STACK_mask
1290   // after all call sites have been visited.
1291   if( _out_arg_limit < out_arg_limit_per_call)
1292     _out_arg_limit = out_arg_limit_per_call;
1293 
1294   if (mcall) {
1295     // Kill the outgoing argument area, including any non-argument holes and
1296     // any legacy C-killed slots.  Use Fat-Projections to do the killing.
1297     // Since the max-per-method covers the max-per-call-site and debug info
1298     // is excluded on the max-per-method basis, debug info cannot land in
1299     // this killed area.
1300     uint r_cnt = mcall->tf()->range()->cnt();
1301     MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1302     if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1303       C->record_method_not_compilable_all_tiers("unsupported outgoing calling sequence");
1304     } else {
1305       for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1306         proj->_rout.Insert(OptoReg::Name(i));
1307     }
1308     if (proj->_rout.is_NotEmpty()) {
1309       push_projection(proj);
1310     }
1311   }
1312   // Transfer the safepoint information from the call to the mcall
1313   // Move the JVMState list
1314   msfpt->set_jvms(sfpt->jvms());
1315   for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1316     jvms->set_map(sfpt);
1317   }
1318 
1319   // Debug inputs begin just after the last incoming parameter
1320   assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1321          (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1322 
1323   // Move the OopMap
1324   msfpt->_oop_map = sfpt->_oop_map;
1325 
1326   // Add additional edges.
1327   if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1328     // For these calls we can not add MachConstantBase in expand(), as the
1329     // ins are not complete then.
1330     msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1331     if (msfpt->jvms() &&
1332         msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1333       // We added an edge before jvms, so we must adapt the position of the ins.
1334       msfpt->jvms()->adapt_position(+1);
1335     }
1336   }
1337 
1338   // Registers killed by the call are set in the local scheduling pass
1339   // of Global Code Motion.
1340   return msfpt;
1341 }
1342 
1343 //---------------------------match_tree----------------------------------------
1344 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce.  Used as part
1345 // of the whole-sale conversion from Ideal to Mach Nodes.  Also used for
1346 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1347 // a Load's result RegMask for memoization in idealreg2regmask[]
1348 MachNode *Matcher::match_tree( const Node *n ) {
1349   assert( n->Opcode() != Op_Phi, "cannot match" );
1350   assert( !n->is_block_start(), "cannot match" );
1351   // Set the mark for all locally allocated State objects.
1352   // When this call returns, the _states_arena arena will be reset
1353   // freeing all State objects.
1354   ResourceMark rm( &_states_arena );
1355 
1356   LabelRootDepth = 0;
1357 
1358   // StoreNodes require their Memory input to match any LoadNodes
1359   Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1360 #ifdef ASSERT
1361   Node* save_mem_node = _mem_node;
1362   _mem_node = n->is_Store() ? (Node*)n : NULL;
1363 #endif
1364   // State object for root node of match tree
1365   // Allocate it on _states_arena - stack allocation can cause stack overflow.
1366   State *s = new (&_states_arena) State;
1367   s->_kids[0] = NULL;
1368   s->_kids[1] = NULL;
1369   s->_leaf = (Node*)n;
1370   // Label the input tree, allocating labels from top-level arena
1371   Label_Root( n, s, n->in(0), mem );
1372   if (C->failing())  return NULL;
1373 
1374   // The minimum cost match for the whole tree is found at the root State
1375   uint mincost = max_juint;
1376   uint cost = max_juint;
1377   uint i;
1378   for( i = 0; i < NUM_OPERANDS; i++ ) {
1379     if( s->valid(i) &&                // valid entry and
1380         s->_cost[i] < cost &&         // low cost and
1381         s->_rule[i] >= NUM_OPERANDS ) // not an operand
1382       cost = s->_cost[mincost=i];
1383   }
1384   if (mincost == max_juint) {
1385 #ifndef PRODUCT
1386     tty->print("No matching rule for:");
1387     s->dump();
1388 #endif
1389     Matcher::soft_match_failure();
1390     return NULL;
1391   }
1392   // Reduce input tree based upon the state labels to machine Nodes
1393   MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1394 #ifdef ASSERT
1395   _old2new_map.map(n->_idx, m);
1396   _new2old_map.map(m->_idx, (Node*)n);
1397 #endif
1398 
1399   // Add any Matcher-ignored edges
1400   uint cnt = n->req();
1401   uint start = 1;
1402   if( mem != (Node*)1 ) start = MemNode::Memory+1;
1403   if( n->is_AddP() ) {
1404     assert( mem == (Node*)1, "" );
1405     start = AddPNode::Base+1;
1406   }
1407   for( i = start; i < cnt; i++ ) {
1408     if( !n->match_edge(i) ) {
1409       if( i < m->req() )
1410         m->ins_req( i, n->in(i) );
1411       else
1412         m->add_req( n->in(i) );
1413     }
1414   }
1415 
1416   debug_only( _mem_node = save_mem_node; )
1417   return m;
1418 }
1419 
1420 
1421 //------------------------------match_into_reg---------------------------------
1422 // Choose to either match this Node in a register or part of the current
1423 // match tree.  Return true for requiring a register and false for matching
1424 // as part of the current match tree.
1425 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1426 
1427   const Type *t = m->bottom_type();
1428 
1429   if (t->singleton()) {
1430     // Never force constants into registers.  Allow them to match as
1431     // constants or registers.  Copies of the same value will share
1432     // the same register.  See find_shared_node.
1433     return false;
1434   } else {                      // Not a constant
1435     // Stop recursion if they have different Controls.
1436     Node* m_control = m->in(0);
1437     // Control of load's memory can post-dominates load's control.
1438     // So use it since load can't float above its memory.
1439     Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1440     if (control && m_control && control != m_control && control != mem_control) {
1441 
1442       // Actually, we can live with the most conservative control we
1443       // find, if it post-dominates the others.  This allows us to
1444       // pick up load/op/store trees where the load can float a little
1445       // above the store.
1446       Node *x = control;
1447       const uint max_scan = 6;  // Arbitrary scan cutoff
1448       uint j;
1449       for (j=0; j<max_scan; j++) {
1450         if (x->is_Region())     // Bail out at merge points
1451           return true;
1452         x = x->in(0);
1453         if (x == m_control)     // Does 'control' post-dominate
1454           break;                // m->in(0)?  If so, we can use it
1455         if (x == mem_control)   // Does 'control' post-dominate
1456           break;                // mem_control?  If so, we can use it
1457       }
1458       if (j == max_scan)        // No post-domination before scan end?
1459         return true;            // Then break the match tree up
1460     }
1461     if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1462         (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1463       // These are commonly used in address expressions and can
1464       // efficiently fold into them on X64 in some cases.
1465       return false;
1466     }
1467   }
1468 
1469   // Not forceable cloning.  If shared, put it into a register.
1470   return shared;
1471 }
1472 
1473 
1474 //------------------------------Instruction Selection--------------------------
1475 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1476 // ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
1477 // things the Matcher does not match (e.g., Memory), and things with different
1478 // Controls (hence forced into different blocks).  We pass in the Control
1479 // selected for this entire State tree.
1480 
1481 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1482 // Store and the Load must have identical Memories (as well as identical
1483 // pointers).  Since the Matcher does not have anything for Memory (and
1484 // does not handle DAGs), I have to match the Memory input myself.  If the
1485 // Tree root is a Store, I require all Loads to have the identical memory.
1486 Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
1487   // Since Label_Root is a recursive function, its possible that we might run
1488   // out of stack space.  See bugs 6272980 & 6227033 for more info.
1489   LabelRootDepth++;
1490   if (LabelRootDepth > MaxLabelRootDepth) {
1491     C->record_method_not_compilable_all_tiers("Out of stack space, increase MaxLabelRootDepth");
1492     return NULL;
1493   }
1494   uint care = 0;                // Edges matcher cares about
1495   uint cnt = n->req();
1496   uint i = 0;
1497 
1498   // Examine children for memory state
1499   // Can only subsume a child into your match-tree if that child's memory state
1500   // is not modified along the path to another input.
1501   // It is unsafe even if the other inputs are separate roots.
1502   Node *input_mem = NULL;
1503   for( i = 1; i < cnt; i++ ) {
1504     if( !n->match_edge(i) ) continue;
1505     Node *m = n->in(i);         // Get ith input
1506     assert( m, "expect non-null children" );
1507     if( m->is_Load() ) {
1508       if( input_mem == NULL ) {
1509         input_mem = m->in(MemNode::Memory);
1510       } else if( input_mem != m->in(MemNode::Memory) ) {
1511         input_mem = NodeSentinel;
1512       }
1513     }
1514   }
1515 
1516   for( i = 1; i < cnt; i++ ){// For my children
1517     if( !n->match_edge(i) ) continue;
1518     Node *m = n->in(i);         // Get ith input
1519     // Allocate states out of a private arena
1520     State *s = new (&_states_arena) State;
1521     svec->_kids[care++] = s;
1522     assert( care <= 2, "binary only for now" );
1523 
1524     // Recursively label the State tree.
1525     s->_kids[0] = NULL;
1526     s->_kids[1] = NULL;
1527     s->_leaf = m;
1528 
1529     // Check for leaves of the State Tree; things that cannot be a part of
1530     // the current tree.  If it finds any, that value is matched as a
1531     // register operand.  If not, then the normal matching is used.
1532     if( match_into_reg(n, m, control, i, is_shared(m)) ||
1533         //
1534         // Stop recursion if this is LoadNode and the root of this tree is a
1535         // StoreNode and the load & store have different memories.
1536         ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1537         // Can NOT include the match of a subtree when its memory state
1538         // is used by any of the other subtrees
1539         (input_mem == NodeSentinel) ) {
1540 #ifndef PRODUCT
1541       // Print when we exclude matching due to different memory states at input-loads
1542       if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1543         && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
1544         tty->print_cr("invalid input_mem");
1545       }
1546 #endif
1547       // Switch to a register-only opcode; this value must be in a register
1548       // and cannot be subsumed as part of a larger instruction.
1549       s->DFA( m->ideal_reg(), m );
1550 
1551     } else {
1552       // If match tree has no control and we do, adopt it for entire tree
1553       if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1554         control = m->in(0);         // Pick up control
1555       // Else match as a normal part of the match tree.
1556       control = Label_Root(m,s,control,mem);
1557       if (C->failing()) return NULL;
1558     }
1559   }
1560 
1561 
1562   // Call DFA to match this node, and return
1563   svec->DFA( n->Opcode(), n );
1564 
1565 #ifdef ASSERT
1566   uint x;
1567   for( x = 0; x < _LAST_MACH_OPER; x++ )
1568     if( svec->valid(x) )
1569       break;
1570 
1571   if (x >= _LAST_MACH_OPER) {
1572     n->dump();
1573     svec->dump();
1574     assert( false, "bad AD file" );
1575   }
1576 #endif
1577   return control;
1578 }
1579 
1580 
1581 // Con nodes reduced using the same rule can share their MachNode
1582 // which reduces the number of copies of a constant in the final
1583 // program.  The register allocator is free to split uses later to
1584 // split live ranges.
1585 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1586   if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1587 
1588   // See if this Con has already been reduced using this rule.
1589   if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1590   MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1591   if (last != NULL && rule == last->rule()) {
1592     // Don't expect control change for DecodeN
1593     if (leaf->is_DecodeNarrowPtr())
1594       return last;
1595     // Get the new space root.
1596     Node* xroot = new_node(C->root());
1597     if (xroot == NULL) {
1598       // This shouldn't happen give the order of matching.
1599       return NULL;
1600     }
1601 
1602     // Shared constants need to have their control be root so they
1603     // can be scheduled properly.
1604     Node* control = last->in(0);
1605     if (control != xroot) {
1606       if (control == NULL || control == C->root()) {
1607         last->set_req(0, xroot);
1608       } else {
1609         assert(false, "unexpected control");
1610         return NULL;
1611       }
1612     }
1613     return last;
1614   }
1615   return NULL;
1616 }
1617 
1618 
1619 //------------------------------ReduceInst-------------------------------------
1620 // Reduce a State tree (with given Control) into a tree of MachNodes.
1621 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1622 // complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
1623 // Each MachNode has a number of complicated MachOper operands; each
1624 // MachOper also covers a further tree of Ideal Nodes.
1625 
1626 // The root of the Ideal match tree is always an instruction, so we enter
1627 // the recursion here.  After building the MachNode, we need to recurse
1628 // the tree checking for these cases:
1629 // (1) Child is an instruction -
1630 //     Build the instruction (recursively), add it as an edge.
1631 //     Build a simple operand (register) to hold the result of the instruction.
1632 // (2) Child is an interior part of an instruction -
1633 //     Skip over it (do nothing)
1634 // (3) Child is the start of a operand -
1635 //     Build the operand, place it inside the instruction
1636 //     Call ReduceOper.
1637 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1638   assert( rule >= NUM_OPERANDS, "called with operand rule" );
1639 
1640   MachNode* shared_node = find_shared_node(s->_leaf, rule);
1641   if (shared_node != NULL) {
1642     return shared_node;
1643   }
1644 
1645   // Build the object to represent this state & prepare for recursive calls
1646   MachNode *mach = s->MachNodeGenerator( rule, C );
1647   mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
1648   assert( mach->_opnds[0] != NULL, "Missing result operand" );
1649   Node *leaf = s->_leaf;
1650   // Check for instruction or instruction chain rule
1651   if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1652     assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1653            "duplicating node that's already been matched");
1654     // Instruction
1655     mach->add_req( leaf->in(0) ); // Set initial control
1656     // Reduce interior of complex instruction
1657     ReduceInst_Interior( s, rule, mem, mach, 1 );
1658   } else {
1659     // Instruction chain rules are data-dependent on their inputs
1660     mach->add_req(0);             // Set initial control to none
1661     ReduceInst_Chain_Rule( s, rule, mem, mach );
1662   }
1663 
1664   // If a Memory was used, insert a Memory edge
1665   if( mem != (Node*)1 ) {
1666     mach->ins_req(MemNode::Memory,mem);
1667 #ifdef ASSERT
1668     // Verify adr type after matching memory operation
1669     const MachOper* oper = mach->memory_operand();
1670     if (oper != NULL && oper != (MachOper*)-1) {
1671       // It has a unique memory operand.  Find corresponding ideal mem node.
1672       Node* m = NULL;
1673       if (leaf->is_Mem()) {
1674         m = leaf;
1675       } else {
1676         m = _mem_node;
1677         assert(m != NULL && m->is_Mem(), "expecting memory node");
1678       }
1679       const Type* mach_at = mach->adr_type();
1680       // DecodeN node consumed by an address may have different type
1681       // then its input. Don't compare types for such case.
1682       if (m->adr_type() != mach_at &&
1683           (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1684            m->in(MemNode::Address)->is_AddP() &&
1685            m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() ||
1686            m->in(MemNode::Address)->is_AddP() &&
1687            m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1688            m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) {
1689         mach_at = m->adr_type();
1690       }
1691       if (m->adr_type() != mach_at) {
1692         m->dump();
1693         tty->print_cr("mach:");
1694         mach->dump(1);
1695       }
1696       assert(m->adr_type() == mach_at, "matcher should not change adr type");
1697     }
1698 #endif
1699   }
1700 
1701   // If the _leaf is an AddP, insert the base edge
1702   if (leaf->is_AddP()) {
1703     mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1704   }
1705 
1706   uint number_of_projections_prior = number_of_projections();
1707 
1708   // Perform any 1-to-many expansions required
1709   MachNode *ex = mach->Expand(s, _projection_list, mem);
1710   if (ex != mach) {
1711     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1712     if( ex->in(1)->is_Con() )
1713       ex->in(1)->set_req(0, C->root());
1714     // Remove old node from the graph
1715     for( uint i=0; i<mach->req(); i++ ) {
1716       mach->set_req(i,NULL);
1717     }
1718 #ifdef ASSERT
1719     _new2old_map.map(ex->_idx, s->_leaf);
1720 #endif
1721   }
1722 
1723   // PhaseChaitin::fixup_spills will sometimes generate spill code
1724   // via the matcher.  By the time, nodes have been wired into the CFG,
1725   // and any further nodes generated by expand rules will be left hanging
1726   // in space, and will not get emitted as output code.  Catch this.
1727   // Also, catch any new register allocation constraints ("projections")
1728   // generated belatedly during spill code generation.
1729   if (_allocation_started) {
1730     guarantee(ex == mach, "no expand rules during spill generation");
1731     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1732   }
1733 
1734   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1735     // Record the con for sharing
1736     _shared_nodes.map(leaf->_idx, ex);
1737   }
1738 
1739   return ex;
1740 }
1741 
1742 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1743   // 'op' is what I am expecting to receive
1744   int op = _leftOp[rule];
1745   // Operand type to catch childs result
1746   // This is what my child will give me.
1747   int opnd_class_instance = s->_rule[op];
1748   // Choose between operand class or not.
1749   // This is what I will receive.
1750   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1751   // New rule for child.  Chase operand classes to get the actual rule.
1752   int newrule = s->_rule[catch_op];
1753 
1754   if( newrule < NUM_OPERANDS ) {
1755     // Chain from operand or operand class, may be output of shared node
1756     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1757             "Bad AD file: Instruction chain rule must chain from operand");
1758     // Insert operand into array of operands for this instruction
1759     mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1760 
1761     ReduceOper( s, newrule, mem, mach );
1762   } else {
1763     // Chain from the result of an instruction
1764     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1765     mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1766     Node *mem1 = (Node*)1;
1767     debug_only(Node *save_mem_node = _mem_node;)
1768     mach->add_req( ReduceInst(s, newrule, mem1) );
1769     debug_only(_mem_node = save_mem_node;)
1770   }
1771   return;
1772 }
1773 
1774 
1775 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1776   if( s->_leaf->is_Load() ) {
1777     Node *mem2 = s->_leaf->in(MemNode::Memory);
1778     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1779     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1780     mem = mem2;
1781   }
1782   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1783     if( mach->in(0) == NULL )
1784       mach->set_req(0, s->_leaf->in(0));
1785   }
1786 
1787   // Now recursively walk the state tree & add operand list.
1788   for( uint i=0; i<2; i++ ) {   // binary tree
1789     State *newstate = s->_kids[i];
1790     if( newstate == NULL ) break;      // Might only have 1 child
1791     // 'op' is what I am expecting to receive
1792     int op;
1793     if( i == 0 ) {
1794       op = _leftOp[rule];
1795     } else {
1796       op = _rightOp[rule];
1797     }
1798     // Operand type to catch childs result
1799     // This is what my child will give me.
1800     int opnd_class_instance = newstate->_rule[op];
1801     // Choose between operand class or not.
1802     // This is what I will receive.
1803     int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1804     // New rule for child.  Chase operand classes to get the actual rule.
1805     int newrule = newstate->_rule[catch_op];
1806 
1807     if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1808       // Operand/operandClass
1809       // Insert operand into array of operands for this instruction
1810       mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
1811       ReduceOper( newstate, newrule, mem, mach );
1812 
1813     } else {                    // Child is internal operand or new instruction
1814       if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1815         // internal operand --> call ReduceInst_Interior
1816         // Interior of complex instruction.  Do nothing but recurse.
1817         num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1818       } else {
1819         // instruction --> call build operand(  ) to catch result
1820         //             --> ReduceInst( newrule )
1821         mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
1822         Node *mem1 = (Node*)1;
1823         debug_only(Node *save_mem_node = _mem_node;)
1824         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1825         debug_only(_mem_node = save_mem_node;)
1826       }
1827     }
1828     assert( mach->_opnds[num_opnds-1], "" );
1829   }
1830   return num_opnds;
1831 }
1832 
1833 // This routine walks the interior of possible complex operands.
1834 // At each point we check our children in the match tree:
1835 // (1) No children -
1836 //     We are a leaf; add _leaf field as an input to the MachNode
1837 // (2) Child is an internal operand -
1838 //     Skip over it ( do nothing )
1839 // (3) Child is an instruction -
1840 //     Call ReduceInst recursively and
1841 //     and instruction as an input to the MachNode
1842 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1843   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1844   State *kid = s->_kids[0];
1845   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1846 
1847   // Leaf?  And not subsumed?
1848   if( kid == NULL && !_swallowed[rule] ) {
1849     mach->add_req( s->_leaf );  // Add leaf pointer
1850     return;                     // Bail out
1851   }
1852 
1853   if( s->_leaf->is_Load() ) {
1854     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1855     mem = s->_leaf->in(MemNode::Memory);
1856     debug_only(_mem_node = s->_leaf;)
1857   }
1858   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1859     if( !mach->in(0) )
1860       mach->set_req(0,s->_leaf->in(0));
1861     else {
1862       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1863     }
1864   }
1865 
1866   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1867     int newrule;
1868     if( i == 0)
1869       newrule = kid->_rule[_leftOp[rule]];
1870     else
1871       newrule = kid->_rule[_rightOp[rule]];
1872 
1873     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1874       // Internal operand; recurse but do nothing else
1875       ReduceOper( kid, newrule, mem, mach );
1876 
1877     } else {                    // Child is a new instruction
1878       // Reduce the instruction, and add a direct pointer from this
1879       // machine instruction to the newly reduced one.
1880       Node *mem1 = (Node*)1;
1881       debug_only(Node *save_mem_node = _mem_node;)
1882       mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1883       debug_only(_mem_node = save_mem_node;)
1884     }
1885   }
1886 }
1887 
1888 
1889 // -------------------------------------------------------------------------
1890 // Java-Java calling convention
1891 // (what you use when Java calls Java)
1892 
1893 //------------------------------find_receiver----------------------------------
1894 // For a given signature, return the OptoReg for parameter 0.
1895 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1896   VMRegPair regs;
1897   BasicType sig_bt = T_OBJECT;
1898   calling_convention(&sig_bt, &regs, 1, is_outgoing);
1899   // Return argument 0 register.  In the LP64 build pointers
1900   // take 2 registers, but the VM wants only the 'main' name.
1901   return OptoReg::as_OptoReg(regs.first());
1902 }
1903 
1904 // This function identifies sub-graphs in which a 'load' node is
1905 // input to two different nodes, and such that it can be matched
1906 // with BMI instructions like blsi, blsr, etc.
1907 // Example : for b = -a[i] & a[i] can be matched to blsi r32, m32.
1908 // The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL*
1909 // refers to the same node.
1910 #ifdef X86
1911 // Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop)
1912 // This is a temporary solution until we make DAGs expressible in ADL.
1913 template<typename ConType>
1914 class FusedPatternMatcher {
1915   Node* _op1_node;
1916   Node* _mop_node;
1917   int _con_op;
1918 
1919   static int match_next(Node* n, int next_op, int next_op_idx) {
1920     if (n->in(1) == NULL || n->in(2) == NULL) {
1921       return -1;
1922     }
1923 
1924     if (next_op_idx == -1) { // n is commutative, try rotations
1925       if (n->in(1)->Opcode() == next_op) {
1926         return 1;
1927       } else if (n->in(2)->Opcode() == next_op) {
1928         return 2;
1929       }
1930     } else {
1931       assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
1932       if (n->in(next_op_idx)->Opcode() == next_op) {
1933         return next_op_idx;
1934       }
1935     }
1936     return -1;
1937   }
1938 public:
1939   FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) :
1940     _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
1941 
1942   bool match(int op1, int op1_op2_idx,  // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
1943              int op2, int op2_con_idx,  // op2 and the index of the op2->con edge, -1 if op2 is commutative
1944              typename ConType::NativeType con_value) {
1945     if (_op1_node->Opcode() != op1) {
1946       return false;
1947     }
1948     if (_mop_node->outcnt() > 2) {
1949       return false;
1950     }
1951     op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
1952     if (op1_op2_idx == -1) {
1953       return false;
1954     }
1955     // Memory operation must be the other edge
1956     int op1_mop_idx = (op1_op2_idx & 1) + 1;
1957 
1958     // Check that the mop node is really what we want
1959     if (_op1_node->in(op1_mop_idx) == _mop_node) {
1960       Node *op2_node = _op1_node->in(op1_op2_idx);
1961       if (op2_node->outcnt() > 1) {
1962         return false;
1963       }
1964       assert(op2_node->Opcode() == op2, "Should be");
1965       op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
1966       if (op2_con_idx == -1) {
1967         return false;
1968       }
1969       // Memory operation must be the other edge
1970       int op2_mop_idx = (op2_con_idx & 1) + 1;
1971       // Check that the memory operation is the same node
1972       if (op2_node->in(op2_mop_idx) == _mop_node) {
1973         // Now check the constant
1974         const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
1975         if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
1976           return true;
1977         }
1978       }
1979     }
1980     return false;
1981   }
1982 };
1983 
1984 
1985 bool Matcher::is_bmi_pattern(Node *n, Node *m) {
1986   if (n != NULL && m != NULL) {
1987     if (m->Opcode() == Op_LoadI) {
1988       FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI);
1989       return bmii.match(Op_AndI, -1, Op_SubI,  1,  0)  ||
1990              bmii.match(Op_AndI, -1, Op_AddI, -1, -1)  ||
1991              bmii.match(Op_XorI, -1, Op_AddI, -1, -1);
1992     } else if (m->Opcode() == Op_LoadL) {
1993       FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL);
1994       return bmil.match(Op_AndL, -1, Op_SubL,  1,  0) ||
1995              bmil.match(Op_AndL, -1, Op_AddL, -1, -1) ||
1996              bmil.match(Op_XorL, -1, Op_AddL, -1, -1);
1997     }
1998   }
1999   return false;
2000 }
2001 #endif // X86
2002 
2003 // A method-klass-holder may be passed in the inline_cache_reg
2004 // and then expanded into the inline_cache_reg and a method_oop register
2005 //   defined in ad_<arch>.cpp
2006 
2007 
2008 //------------------------------find_shared------------------------------------
2009 // Set bits if Node is shared or otherwise a root
2010 void Matcher::find_shared( Node *n ) {
2011   // Allocate stack of size C->unique() * 2 to avoid frequent realloc
2012   MStack mstack(C->unique() * 2);
2013   // Mark nodes as address_visited if they are inputs to an address expression
2014   VectorSet address_visited(Thread::current()->resource_area());
2015   mstack.push(n, Visit);     // Don't need to pre-visit root node
2016   while (mstack.is_nonempty()) {
2017     n = mstack.node();       // Leave node on stack
2018     Node_State nstate = mstack.state();
2019     uint nop = n->Opcode();
2020     if (nstate == Pre_Visit) {
2021       if (address_visited.test(n->_idx)) { // Visited in address already?
2022         // Flag as visited and shared now.
2023         set_visited(n);
2024       }
2025       if (is_visited(n)) {   // Visited already?
2026         // Node is shared and has no reason to clone.  Flag it as shared.
2027         // This causes it to match into a register for the sharing.
2028         set_shared(n);       // Flag as shared and
2029         mstack.pop();        // remove node from stack
2030         continue;
2031       }
2032       nstate = Visit; // Not already visited; so visit now
2033     }
2034     if (nstate == Visit) {
2035       mstack.set_state(Post_Visit);
2036       set_visited(n);   // Flag as visited now
2037       bool mem_op = false;
2038 
2039       switch( nop ) {  // Handle some opcodes special
2040       case Op_Phi:             // Treat Phis as shared roots
2041       case Op_Parm:
2042       case Op_Proj:            // All handled specially during matching
2043       case Op_SafePointScalarObject:
2044         set_shared(n);
2045         set_dontcare(n);
2046         break;
2047       case Op_If:
2048       case Op_CountedLoopEnd:
2049         mstack.set_state(Alt_Post_Visit); // Alternative way
2050         // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)).  Helps
2051         // with matching cmp/branch in 1 instruction.  The Matcher needs the
2052         // Bool and CmpX side-by-side, because it can only get at constants
2053         // that are at the leaves of Match trees, and the Bool's condition acts
2054         // as a constant here.
2055         mstack.push(n->in(1), Visit);         // Clone the Bool
2056         mstack.push(n->in(0), Pre_Visit);     // Visit control input
2057         continue; // while (mstack.is_nonempty())
2058       case Op_ConvI2D:         // These forms efficiently match with a prior
2059       case Op_ConvI2F:         //   Load but not a following Store
2060         if( n->in(1)->is_Load() &&        // Prior load
2061             n->outcnt() == 1 &&           // Not already shared
2062             n->unique_out()->is_Store() ) // Following store
2063           set_shared(n);       // Force it to be a root
2064         break;
2065       case Op_ReverseBytesI:
2066       case Op_ReverseBytesL:
2067         if( n->in(1)->is_Load() &&        // Prior load
2068             n->outcnt() == 1 )            // Not already shared
2069           set_shared(n);                  // Force it to be a root
2070         break;
2071       case Op_BoxLock:         // Cant match until we get stack-regs in ADLC
2072       case Op_IfFalse:
2073       case Op_IfTrue:
2074       case Op_MachProj:
2075       case Op_MergeMem:
2076       case Op_Catch:
2077       case Op_CatchProj:
2078       case Op_CProj:
2079       case Op_JumpProj:
2080       case Op_JProj:
2081       case Op_NeverBranch:
2082         set_dontcare(n);
2083         break;
2084       case Op_Jump:
2085         mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2086         mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2087         continue;                             // while (mstack.is_nonempty())
2088       case Op_StrComp:
2089       case Op_StrEquals:
2090       case Op_StrIndexOf:
2091       case Op_AryEq:
2092       case Op_EncodeISOArray:
2093         set_shared(n); // Force result into register (it will be anyways)
2094         break;
2095       case Op_ConP: {  // Convert pointers above the centerline to NUL
2096         TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2097         const TypePtr* tp = tn->type()->is_ptr();
2098         if (tp->_ptr == TypePtr::AnyNull) {
2099           tn->set_type(TypePtr::NULL_PTR);
2100         }
2101         break;
2102       }
2103       case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2104         TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2105         const TypePtr* tp = tn->type()->make_ptr();
2106         if (tp && tp->_ptr == TypePtr::AnyNull) {
2107           tn->set_type(TypeNarrowOop::NULL_PTR);
2108         }
2109         break;
2110       }
2111       case Op_Binary:         // These are introduced in the Post_Visit state.
2112         ShouldNotReachHere();
2113         break;
2114       case Op_ClearArray:
2115       case Op_SafePoint:
2116         mem_op = true;
2117         break;
2118       default:
2119         if( n->is_Store() ) {
2120           // Do match stores, despite no ideal reg
2121           mem_op = true;
2122           break;
2123         }
2124         if( n->is_Mem() ) { // Loads and LoadStores
2125           mem_op = true;
2126           // Loads must be root of match tree due to prior load conflict
2127           if( C->subsume_loads() == false )
2128             set_shared(n);
2129         }
2130         // Fall into default case
2131         if( !n->ideal_reg() )
2132           set_dontcare(n);  // Unmatchable Nodes
2133       } // end_switch
2134 
2135       for(int i = n->req() - 1; i >= 0; --i) { // For my children
2136         Node *m = n->in(i); // Get ith input
2137         if (m == NULL) continue;  // Ignore NULLs
2138         uint mop = m->Opcode();
2139 
2140         // Must clone all producers of flags, or we will not match correctly.
2141         // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2142         // then it will match into an ideal Op_RegFlags.  Alas, the fp-flags
2143         // are also there, so we may match a float-branch to int-flags and
2144         // expect the allocator to haul the flags from the int-side to the
2145         // fp-side.  No can do.
2146         if( _must_clone[mop] ) {
2147           mstack.push(m, Visit);
2148           continue; // for(int i = ...)
2149         }
2150 
2151         if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
2152           // Bases used in addresses must be shared but since
2153           // they are shared through a DecodeN they may appear
2154           // to have a single use so force sharing here.
2155           set_shared(m->in(AddPNode::Base)->in(1));
2156         }
2157 
2158         // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
2159 #ifdef X86
2160         if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
2161           mstack.push(m, Visit);
2162           continue;
2163         }
2164 #endif
2165 
2166         // Clone addressing expressions as they are "free" in memory access instructions
2167         if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
2168           // Some inputs for address expression are not put on stack
2169           // to avoid marking them as shared and forcing them into register
2170           // if they are used only in address expressions.
2171           // But they should be marked as shared if there are other uses
2172           // besides address expressions.
2173 
2174           Node *off = m->in(AddPNode::Offset);
2175           if( off->is_Con() &&
2176               // When there are other uses besides address expressions
2177               // put it on stack and mark as shared.
2178               !is_visited(m) ) {
2179             address_visited.test_set(m->_idx); // Flag as address_visited
2180             Node *adr = m->in(AddPNode::Address);
2181 
2182             // Intel, ARM and friends can handle 2 adds in addressing mode
2183             if( clone_shift_expressions && adr->is_AddP() &&
2184                 // AtomicAdd is not an addressing expression.
2185                 // Cheap to find it by looking for screwy base.
2186                 !adr->in(AddPNode::Base)->is_top() &&
2187                 // Are there other uses besides address expressions?
2188                 !is_visited(adr) ) {
2189               address_visited.set(adr->_idx); // Flag as address_visited
2190               Node *shift = adr->in(AddPNode::Offset);
2191               // Check for shift by small constant as well
2192               if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
2193                   shift->in(2)->get_int() <= 3 &&
2194                   // Are there other uses besides address expressions?
2195                   !is_visited(shift) ) {
2196                 address_visited.set(shift->_idx); // Flag as address_visited
2197                 mstack.push(shift->in(2), Visit);
2198                 Node *conv = shift->in(1);
2199 #ifdef _LP64
2200                 // Allow Matcher to match the rule which bypass
2201                 // ConvI2L operation for an array index on LP64
2202                 // if the index value is positive.
2203                 if( conv->Opcode() == Op_ConvI2L &&
2204                     conv->as_Type()->type()->is_long()->_lo >= 0 &&
2205                     // Are there other uses besides address expressions?
2206                     !is_visited(conv) ) {
2207                   address_visited.set(conv->_idx); // Flag as address_visited
2208                   mstack.push(conv->in(1), Pre_Visit);
2209                 } else
2210 #endif
2211                 mstack.push(conv, Pre_Visit);
2212               } else {
2213                 mstack.push(shift, Pre_Visit);
2214               }
2215               mstack.push(adr->in(AddPNode::Address), Pre_Visit);
2216               mstack.push(adr->in(AddPNode::Base), Pre_Visit);
2217             } else {  // Sparc, Alpha, PPC and friends
2218               mstack.push(adr, Pre_Visit);
2219             }
2220 
2221             // Clone X+offset as it also folds into most addressing expressions
2222             mstack.push(off, Visit);
2223             mstack.push(m->in(AddPNode::Base), Pre_Visit);
2224             continue; // for(int i = ...)
2225           } // if( off->is_Con() )
2226         }   // if( mem_op &&
2227         mstack.push(m, Pre_Visit);
2228       }     // for(int i = ...)
2229     }
2230     else if (nstate == Alt_Post_Visit) {
2231       mstack.pop(); // Remove node from stack
2232       // We cannot remove the Cmp input from the Bool here, as the Bool may be
2233       // shared and all users of the Bool need to move the Cmp in parallel.
2234       // This leaves both the Bool and the If pointing at the Cmp.  To
2235       // prevent the Matcher from trying to Match the Cmp along both paths
2236       // BoolNode::match_edge always returns a zero.
2237 
2238       // We reorder the Op_If in a pre-order manner, so we can visit without
2239       // accidentally sharing the Cmp (the Bool and the If make 2 users).
2240       n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2241     }
2242     else if (nstate == Post_Visit) {
2243       mstack.pop(); // Remove node from stack
2244 
2245       // Now hack a few special opcodes
2246       switch( n->Opcode() ) {       // Handle some opcodes special
2247       case Op_StorePConditional:
2248       case Op_StoreIConditional:
2249       case Op_StoreLConditional:
2250       case Op_CompareAndSwapI:
2251       case Op_CompareAndSwapL:
2252       case Op_CompareAndSwapP:
2253       case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
2254         Node *newval = n->in(MemNode::ValueIn );
2255         Node *oldval  = n->in(LoadStoreConditionalNode::ExpectedIn);
2256         Node *pair = new BinaryNode( oldval, newval );
2257         n->set_req(MemNode::ValueIn,pair);
2258         n->del_req(LoadStoreConditionalNode::ExpectedIn);
2259         break;
2260       }
2261       case Op_CMoveD:              // Convert trinary to binary-tree
2262       case Op_CMoveF:
2263       case Op_CMoveI:
2264       case Op_CMoveL:
2265       case Op_CMoveN:
2266       case Op_CMoveP: {
2267         // Restructure into a binary tree for Matching.  It's possible that
2268         // we could move this code up next to the graph reshaping for IfNodes
2269         // or vice-versa, but I do not want to debug this for Ladybird.
2270         // 10/2/2000 CNC.
2271         Node *pair1 = new BinaryNode(n->in(1),n->in(1)->in(1));
2272         n->set_req(1,pair1);
2273         Node *pair2 = new BinaryNode(n->in(2),n->in(3));
2274         n->set_req(2,pair2);
2275         n->del_req(3);
2276         break;
2277       }
2278       case Op_LoopLimit: {
2279         Node *pair1 = new BinaryNode(n->in(1),n->in(2));
2280         n->set_req(1,pair1);
2281         n->set_req(2,n->in(3));
2282         n->del_req(3);
2283         break;
2284       }
2285       case Op_StrEquals: {
2286         Node *pair1 = new BinaryNode(n->in(2),n->in(3));
2287         n->set_req(2,pair1);
2288         n->set_req(3,n->in(4));
2289         n->del_req(4);
2290         break;
2291       }
2292       case Op_StrComp:
2293       case Op_StrIndexOf: {
2294         Node *pair1 = new BinaryNode(n->in(2),n->in(3));
2295         n->set_req(2,pair1);
2296         Node *pair2 = new BinaryNode(n->in(4),n->in(5));
2297         n->set_req(3,pair2);
2298         n->del_req(5);
2299         n->del_req(4);
2300         break;
2301       }
2302       case Op_EncodeISOArray: {
2303         // Restructure into a binary tree for Matching.
2304         Node* pair = new BinaryNode(n->in(3), n->in(4));
2305         n->set_req(3, pair);
2306         n->del_req(4);
2307         break;
2308       }
2309       default:
2310         break;
2311       }
2312     }
2313     else {
2314       ShouldNotReachHere();
2315     }
2316   } // end of while (mstack.is_nonempty())
2317 }
2318 
2319 #ifdef ASSERT
2320 // machine-independent root to machine-dependent root
2321 void Matcher::dump_old2new_map() {
2322   _old2new_map.dump();
2323 }
2324 #endif
2325 
2326 //---------------------------collect_null_checks-------------------------------
2327 // Find null checks in the ideal graph; write a machine-specific node for
2328 // it.  Used by later implicit-null-check handling.  Actually collects
2329 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2330 // value being tested.
2331 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2332   Node *iff = proj->in(0);
2333   if( iff->Opcode() == Op_If ) {
2334     // During matching If's have Bool & Cmp side-by-side
2335     BoolNode *b = iff->in(1)->as_Bool();
2336     Node *cmp = iff->in(2);
2337     int opc = cmp->Opcode();
2338     if (opc != Op_CmpP && opc != Op_CmpN) return;
2339 
2340     const Type* ct = cmp->in(2)->bottom_type();
2341     if (ct == TypePtr::NULL_PTR ||
2342         (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2343 
2344       bool push_it = false;
2345       if( proj->Opcode() == Op_IfTrue ) {
2346         extern int all_null_checks_found;
2347         all_null_checks_found++;
2348         if( b->_test._test == BoolTest::ne ) {
2349           push_it = true;
2350         }
2351       } else {
2352         assert( proj->Opcode() == Op_IfFalse, "" );
2353         if( b->_test._test == BoolTest::eq ) {
2354           push_it = true;
2355         }
2356       }
2357       if( push_it ) {
2358         _null_check_tests.push(proj);
2359         Node* val = cmp->in(1);
2360 #ifdef _LP64
2361         if (val->bottom_type()->isa_narrowoop() &&
2362             !Matcher::narrow_oop_use_complex_address()) {
2363           //
2364           // Look for DecodeN node which should be pinned to orig_proj.
2365           // On platforms (Sparc) which can not handle 2 adds
2366           // in addressing mode we have to keep a DecodeN node and
2367           // use it to do implicit NULL check in address.
2368           //
2369           // DecodeN node was pinned to non-null path (orig_proj) during
2370           // CastPP transformation in final_graph_reshaping_impl().
2371           //
2372           uint cnt = orig_proj->outcnt();
2373           for (uint i = 0; i < orig_proj->outcnt(); i++) {
2374             Node* d = orig_proj->raw_out(i);
2375             if (d->is_DecodeN() && d->in(1) == val) {
2376               val = d;
2377               val->set_req(0, NULL); // Unpin now.
2378               // Mark this as special case to distinguish from
2379               // a regular case: CmpP(DecodeN, NULL).
2380               val = (Node*)(((intptr_t)val) | 1);
2381               break;
2382             }
2383           }
2384         }
2385 #endif
2386         _null_check_tests.push(val);
2387       }
2388     }
2389   }
2390 }
2391 
2392 //---------------------------validate_null_checks------------------------------
2393 // Its possible that the value being NULL checked is not the root of a match
2394 // tree.  If so, I cannot use the value in an implicit null check.
2395 void Matcher::validate_null_checks( ) {
2396   uint cnt = _null_check_tests.size();
2397   for( uint i=0; i < cnt; i+=2 ) {
2398     Node *test = _null_check_tests[i];
2399     Node *val = _null_check_tests[i+1];
2400     bool is_decoden = ((intptr_t)val) & 1;
2401     val = (Node*)(((intptr_t)val) & ~1);
2402     if (has_new_node(val)) {
2403       Node* new_val = new_node(val);
2404       if (is_decoden) {
2405         assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2406         // Note: new_val may have a control edge if
2407         // the original ideal node DecodeN was matched before
2408         // it was unpinned in Matcher::collect_null_checks().
2409         // Unpin the mach node and mark it.
2410         new_val->set_req(0, NULL);
2411         new_val = (Node*)(((intptr_t)new_val) | 1);
2412       }
2413       // Is a match-tree root, so replace with the matched value
2414       _null_check_tests.map(i+1, new_val);
2415     } else {
2416       // Yank from candidate list
2417       _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2418       _null_check_tests.map(i,_null_check_tests[--cnt]);
2419       _null_check_tests.pop();
2420       _null_check_tests.pop();
2421       i-=2;
2422     }
2423   }
2424 }
2425 
2426 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
2427 // atomic instruction acting as a store_load barrier without any
2428 // intervening volatile load, and thus we don't need a barrier here.
2429 // We retain the Node to act as a compiler ordering barrier.
2430 bool Matcher::post_store_load_barrier(const Node* vmb) {
2431   Compile* C = Compile::current();
2432   assert(vmb->is_MemBar(), "");
2433   assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2434   const MemBarNode* membar = vmb->as_MemBar();
2435 
2436   // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2437   Node* ctrl = NULL;
2438   for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2439     Node* p = membar->fast_out(i);
2440     assert(p->is_Proj(), "only projections here");
2441     if ((p->as_Proj()->_con == TypeFunc::Control) &&
2442         !C->node_arena()->contains(p)) { // Unmatched old-space only
2443       ctrl = p;
2444       break;
2445     }
2446   }
2447   assert((ctrl != NULL), "missing control projection");
2448 
2449   for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2450     Node *x = ctrl->fast_out(j);
2451     int xop = x->Opcode();
2452 
2453     // We don't need current barrier if we see another or a lock
2454     // before seeing volatile load.
2455     //
2456     // Op_Fastunlock previously appeared in the Op_* list below.
2457     // With the advent of 1-0 lock operations we're no longer guaranteed
2458     // that a monitor exit operation contains a serializing instruction.
2459 
2460     if (xop == Op_MemBarVolatile ||
2461         xop == Op_CompareAndSwapL ||
2462         xop == Op_CompareAndSwapP ||
2463         xop == Op_CompareAndSwapN ||
2464         xop == Op_CompareAndSwapI) {
2465       return true;
2466     }
2467 
2468     // Op_FastLock previously appeared in the Op_* list above.
2469     // With biased locking we're no longer guaranteed that a monitor
2470     // enter operation contains a serializing instruction.
2471     if ((xop == Op_FastLock) && !UseBiasedLocking) {
2472       return true;
2473     }
2474 
2475     if (x->is_MemBar()) {
2476       // We must retain this membar if there is an upcoming volatile
2477       // load, which will be followed by acquire membar.
2478       if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2479         return false;
2480       } else {
2481         // For other kinds of barriers, check by pretending we
2482         // are them, and seeing if we can be removed.
2483         return post_store_load_barrier(x->as_MemBar());
2484       }
2485     }
2486 
2487     // probably not necessary to check for these
2488     if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2489       return false;
2490     }
2491   }
2492   return false;
2493 }
2494 
2495 // Check whether node n is a branch to an uncommon trap that we could
2496 // optimize as test with very high branch costs in case of going to
2497 // the uncommon trap. The code must be able to be recompiled to use
2498 // a cheaper test.
2499 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2500   // Don't do it for natives, adapters, or runtime stubs
2501   Compile *C = Compile::current();
2502   if (!C->is_method_compilation()) return false;
2503 
2504   assert(n->is_If(), "You should only call this on if nodes.");
2505   IfNode *ifn = n->as_If();
2506 
2507   Node *ifFalse = NULL;
2508   for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2509     if (ifn->fast_out(i)->is_IfFalse()) {
2510       ifFalse = ifn->fast_out(i);
2511       break;
2512     }
2513   }
2514   assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2515 
2516   Node *reg = ifFalse;
2517   int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
2518                // Alternatively use visited set?  Seems too expensive.
2519   while (reg != NULL && cnt > 0) {
2520     CallNode *call = NULL;
2521     RegionNode *nxt_reg = NULL;
2522     for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2523       Node *o = reg->fast_out(i);
2524       if (o->is_Call()) {
2525         call = o->as_Call();
2526       }
2527       if (o->is_Region()) {
2528         nxt_reg = o->as_Region();
2529       }
2530     }
2531 
2532     if (call &&
2533         call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2534       const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2535       if (trtype->isa_int() && trtype->is_int()->is_con()) {
2536         jint tr_con = trtype->is_int()->get_con();
2537         Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2538         Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2539         assert((int)reason < (int)BitsPerInt, "recode bit map");
2540 
2541         if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2542             && action != Deoptimization::Action_none) {
2543           // This uncommon trap is sure to recompile, eventually.
2544           // When that happens, C->too_many_traps will prevent
2545           // this transformation from happening again.
2546           return true;
2547         }
2548       }
2549     }
2550 
2551     reg = nxt_reg;
2552     cnt--;
2553   }
2554 
2555   return false;
2556 }
2557 
2558 //=============================================================================
2559 //---------------------------State---------------------------------------------
2560 State::State(void) {
2561 #ifdef ASSERT
2562   _id = 0;
2563   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2564   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2565   //memset(_cost, -1, sizeof(_cost));
2566   //memset(_rule, -1, sizeof(_rule));
2567 #endif
2568   memset(_valid, 0, sizeof(_valid));
2569 }
2570 
2571 #ifdef ASSERT
2572 State::~State() {
2573   _id = 99;
2574   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2575   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2576   memset(_cost, -3, sizeof(_cost));
2577   memset(_rule, -3, sizeof(_rule));
2578 }
2579 #endif
2580 
2581 #ifndef PRODUCT
2582 //---------------------------dump----------------------------------------------
2583 void State::dump() {
2584   tty->print("\n");
2585   dump(0);
2586 }
2587 
2588 void State::dump(int depth) {
2589   for( int j = 0; j < depth; j++ )
2590     tty->print("   ");
2591   tty->print("--N: ");
2592   _leaf->dump();
2593   uint i;
2594   for( i = 0; i < _LAST_MACH_OPER; i++ )
2595     // Check for valid entry
2596     if( valid(i) ) {
2597       for( int j = 0; j < depth; j++ )
2598         tty->print("   ");
2599         assert(_cost[i] != max_juint, "cost must be a valid value");
2600         assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2601         tty->print_cr("%s  %d  %s",
2602                       ruleName[i], _cost[i], ruleName[_rule[i]] );
2603       }
2604   tty->cr();
2605 
2606   for( i=0; i<2; i++ )
2607     if( _kids[i] )
2608       _kids[i]->dump(depth+1);
2609 }
2610 #endif