1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "opto/ad.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/idealGraphPrinter.hpp"
  32 #include "opto/matcher.hpp"
  33 #include "opto/memnode.hpp"
  34 #include "opto/movenode.hpp"
  35 #include "opto/opcodes.hpp"
  36 #include "opto/regmask.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "opto/type.hpp"
  40 #include "opto/vectornode.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 OptoReg::Name OptoReg::c_frame_pointer;
  45 
  46 const RegMask *Matcher::idealreg2regmask[static_cast<uint>(Opcodes::_last_machine_leaf)];
  47 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  48 RegMask Matcher::STACK_ONLY_mask;
  49 RegMask Matcher::c_frame_ptr_mask;
  50 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  51 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  52 
  53 //---------------------------Matcher-------------------------------------------
  54 Matcher::Matcher()
  55 : PhaseTransform( Phase::Ins_Select ),
  56 #ifdef ASSERT
  57   _old2new_map(C->comp_arena()),
  58   _new2old_map(C->comp_arena()),
  59 #endif
  60   _shared_nodes(C->comp_arena()),
  61   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  62   _swallowed(swallowed),
  63   _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
  64   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  65   _must_clone(must_clone),
  66   _register_save_policy(register_save_policy),
  67   _c_reg_save_policy(c_reg_save_policy),
  68   _register_save_type(register_save_type),
  69   _ruleName(ruleName),
  70   _allocation_started(false),
  71   _states_arena(Chunk::medium_size),
  72   _visited(&_states_arena),
  73   _shared(&_states_arena),
  74   _dontcare(&_states_arena) {
  75   C->set_matcher(this);
  76 
  77   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegI)] = NULL;
  78   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegN)] = NULL;
  79   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegL)] = NULL;
  80   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegF)] = NULL;
  81   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegD)] = NULL;
  82   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegP)] = NULL;
  83   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecS)] = NULL;
  84   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecD)] = NULL;
  85   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecX)] = NULL;
  86   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecY)] = NULL;
  87   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecZ)] = NULL;
  88 
  89   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegI)] = NULL;
  90   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegN)] = NULL;
  91   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegL)] = NULL;
  92   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegF)] = NULL;
  93   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegD)] = NULL;
  94   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegP)] = NULL;
  95   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_VecS)] = NULL;
  96   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_VecD)] = NULL;
  97   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_VecX)] = NULL;
  98   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_VecY)] = NULL;
  99   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_VecZ)] = NULL;
 100 
 101   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegI)] = NULL;
 102   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegN)] = NULL;
 103   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegL)] = NULL;
 104   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegF)] = NULL;
 105   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegD)] = NULL;
 106   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegP)] = NULL;
 107   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_VecS)] = NULL;
 108   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_VecD)] = NULL;
 109   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_VecX)] = NULL;
 110   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_VecY)] = NULL;
 111   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_VecZ)] = NULL;
 112 
 113   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 114 }
 115 
 116 //------------------------------warp_incoming_stk_arg------------------------
 117 // This warps a VMReg into an OptoReg::Name
 118 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 119   OptoReg::Name warped;
 120   if( reg->is_stack() ) {  // Stack slot argument?
 121     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 122     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 123     if( warped >= _in_arg_limit )
 124       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 125     if (!RegMask::can_represent_arg(warped)) {
 126       // the compiler cannot represent this method's calling sequence
 127       C->record_method_not_compilable("unsupported incoming calling sequence");
 128       return OptoReg::Bad;
 129     }
 130     return warped;
 131   }
 132   return OptoReg::as_OptoReg(reg);
 133 }
 134 
 135 //---------------------------compute_old_SP------------------------------------
 136 OptoReg::Name Compile::compute_old_SP() {
 137   int fixed    = fixed_slots();
 138   int preserve = in_preserve_stack_slots();
 139   return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
 140 }
 141 
 142 
 143 
 144 #ifdef ASSERT
 145 void Matcher::verify_new_nodes_only(Node* xroot) {
 146   // Make sure that the new graph only references new nodes
 147   ResourceMark rm;
 148   Unique_Node_List worklist;
 149   VectorSet visited(Thread::current()->resource_area());
 150   worklist.push(xroot);
 151   while (worklist.size() > 0) {
 152     Node* n = worklist.pop();
 153     visited <<= n->_idx;
 154     assert(C->node_arena()->contains(n), "dead node");
 155     for (uint j = 0; j < n->req(); j++) {
 156       Node* in = n->in(j);
 157       if (in != NULL) {
 158         assert(C->node_arena()->contains(in), "dead node");
 159         if (!visited.test(in->_idx)) {
 160           worklist.push(in);
 161         }
 162       }
 163     }
 164   }
 165 }
 166 #endif
 167 
 168 
 169 //---------------------------match---------------------------------------------
 170 void Matcher::match( ) {
 171   if( MaxLabelRootDepth < 100 ) { // Too small?
 172     assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
 173     MaxLabelRootDepth = 100;
 174   }
 175   // One-time initialization of some register masks.
 176   init_spill_mask( C->root()->in(1) );
 177   _return_addr_mask = return_addr();
 178 #ifdef _LP64
 179   // Pointers take 2 slots in 64-bit land
 180   _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
 181 #endif
 182 
 183   // Map a Java-signature return type into return register-value
 184   // machine registers for 0, 1 and 2 returned values.
 185   const TypeTuple *range = C->tf()->range();
 186   if( range->cnt() > TypeFunc::Parms ) { // If not a void function
 187     // Get ideal-register return type
 188     Opcodes ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
 189     // Get machine return register
 190     Opcodes sop = C->start()->Opcode();
 191     OptoRegPair regs = return_value(ireg, false);
 192 
 193     // And mask for same
 194     _return_value_mask = RegMask(regs.first());
 195     if( OptoReg::is_valid(regs.second()) )
 196       _return_value_mask.Insert(regs.second());
 197   }
 198 
 199   // ---------------
 200   // Frame Layout
 201 
 202   // Need the method signature to determine the incoming argument types,
 203   // because the types determine which registers the incoming arguments are
 204   // in, and this affects the matched code.
 205   const TypeTuple *domain = C->tf()->domain();
 206   uint             argcnt = domain->cnt() - TypeFunc::Parms;
 207   BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
 208   VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
 209   _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
 210   _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
 211   uint i;
 212   for( i = 0; i<argcnt; i++ ) {
 213     sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
 214   }
 215 
 216   // Pass array of ideal registers and length to USER code (from the AD file)
 217   // that will convert this to an array of register numbers.
 218   const StartNode *start = C->start();
 219   start->calling_convention( sig_bt, vm_parm_regs, argcnt );
 220 #ifdef ASSERT
 221   // Sanity check users' calling convention.  Real handy while trying to
 222   // get the initial port correct.
 223   { for (uint i = 0; i<argcnt; i++) {
 224       if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 225         assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
 226         _parm_regs[i].set_bad();
 227         continue;
 228       }
 229       VMReg parm_reg = vm_parm_regs[i].first();
 230       assert(parm_reg->is_valid(), "invalid arg?");
 231       if (parm_reg->is_reg()) {
 232         OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
 233         assert(can_be_java_arg(opto_parm_reg) ||
 234                C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
 235                opto_parm_reg == inline_cache_reg(),
 236                "parameters in register must be preserved by runtime stubs");
 237       }
 238       for (uint j = 0; j < i; j++) {
 239         assert(parm_reg != vm_parm_regs[j].first(),
 240                "calling conv. must produce distinct regs");
 241       }
 242     }
 243   }
 244 #endif
 245 
 246   // Do some initial frame layout.
 247 
 248   // Compute the old incoming SP (may be called FP) as
 249   //   OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
 250   _old_SP = C->compute_old_SP();
 251   assert( is_even(_old_SP), "must be even" );
 252 
 253   // Compute highest incoming stack argument as
 254   //   _old_SP + out_preserve_stack_slots + incoming argument size.
 255   _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 256   assert( is_even(_in_arg_limit), "out_preserve must be even" );
 257   for( i = 0; i < argcnt; i++ ) {
 258     // Permit args to have no register
 259     _calling_convention_mask[i].Clear();
 260     if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 261       continue;
 262     }
 263     // calling_convention returns stack arguments as a count of
 264     // slots beyond OptoReg::stack0()/VMRegImpl::stack0.  We need to convert this to
 265     // the allocators point of view, taking into account all the
 266     // preserve area, locks & pad2.
 267 
 268     OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
 269     if( OptoReg::is_valid(reg1))
 270       _calling_convention_mask[i].Insert(reg1);
 271 
 272     OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
 273     if( OptoReg::is_valid(reg2))
 274       _calling_convention_mask[i].Insert(reg2);
 275 
 276     // Saved biased stack-slot register number
 277     _parm_regs[i].set_pair(reg2, reg1);
 278   }
 279 
 280   // Finally, make sure the incoming arguments take up an even number of
 281   // words, in case the arguments or locals need to contain doubleword stack
 282   // slots.  The rest of the system assumes that stack slot pairs (in
 283   // particular, in the spill area) which look aligned will in fact be
 284   // aligned relative to the stack pointer in the target machine.  Double
 285   // stack slots will always be allocated aligned.
 286   _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
 287 
 288   // Compute highest outgoing stack argument as
 289   //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
 290   _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
 291   assert( is_even(_out_arg_limit), "out_preserve must be even" );
 292 
 293   if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
 294     // the compiler cannot represent this method's calling sequence
 295     C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
 296   }
 297 
 298   if (C->failing())  return;  // bailed out on incoming arg failure
 299 
 300   // ---------------
 301   // Collect roots of matcher trees.  Every node for which
 302   // _shared[_idx] is cleared is guaranteed to not be shared, and thus
 303   // can be a valid interior of some tree.
 304   find_shared( C->root() );
 305   find_shared( C->top() );
 306 
 307   C->print_method(PHASE_BEFORE_MATCHING);
 308 
 309   // Create new ideal node ConP #NULL even if it does exist in old space
 310   // to avoid false sharing if the corresponding mach node is not used.
 311   // The corresponding mach node is only used in rare cases for derived
 312   // pointers.
 313   Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
 314 
 315   // Swap out to old-space; emptying new-space
 316   Arena *old = C->node_arena()->move_contents(C->old_arena());
 317 
 318   // Save debug and profile information for nodes in old space:
 319   _old_node_note_array = C->node_note_array();
 320   if (_old_node_note_array != NULL) {
 321     C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
 322                            (C->comp_arena(), _old_node_note_array->length(),
 323                             0, NULL));
 324   }
 325 
 326   // Pre-size the new_node table to avoid the need for range checks.
 327   grow_new_node_array(C->unique());
 328 
 329   // Reset node counter so MachNodes start with _idx at 0
 330   int live_nodes = C->live_nodes();
 331   C->set_unique(0);
 332   C->reset_dead_node_list();
 333 
 334   // Recursively match trees from old space into new space.
 335   // Correct leaves of new-space Nodes; they point to old-space.
 336   _visited.Clear();             // Clear visit bits for xform call
 337   C->set_cached_top_node(xform( C->top(), live_nodes ));
 338   if (!C->failing()) {
 339     Node* xroot =        xform( C->root(), 1 );
 340     if (xroot == NULL) {
 341       Matcher::soft_match_failure();  // recursive matching process failed
 342       C->record_method_not_compilable("instruction match failed");
 343     } else {
 344       // During matching shared constants were attached to C->root()
 345       // because xroot wasn't available yet, so transfer the uses to
 346       // the xroot.
 347       for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
 348         Node* n = C->root()->fast_out(j);
 349         if (C->node_arena()->contains(n)) {
 350           assert(n->in(0) == C->root(), "should be control user");
 351           n->set_req(0, xroot);
 352           --j;
 353           --jmax;
 354         }
 355       }
 356 
 357       // Generate new mach node for ConP #NULL
 358       assert(new_ideal_null != NULL, "sanity");
 359       _mach_null = match_tree(new_ideal_null);
 360       // Don't set control, it will confuse GCM since there are no uses.
 361       // The control will be set when this node is used first time
 362       // in find_base_for_derived().
 363       assert(_mach_null != NULL, "");
 364 
 365       C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
 366 
 367 #ifdef ASSERT
 368       verify_new_nodes_only(xroot);
 369 #endif
 370     }
 371   }
 372   if (C->top() == NULL || C->root() == NULL) {
 373     C->record_method_not_compilable("graph lost"); // %%% cannot happen?
 374   }
 375   if (C->failing()) {
 376     // delete old;
 377     old->destruct_contents();
 378     return;
 379   }
 380   assert( C->top(), "" );
 381   assert( C->root(), "" );
 382   validate_null_checks();
 383 
 384   // Now smoke old-space
 385   NOT_DEBUG( old->destruct_contents() );
 386 
 387   // ------------------------
 388   // Set up save-on-entry registers
 389   Fixup_Save_On_Entry( );
 390 }
 391 
 392 
 393 //------------------------------Fixup_Save_On_Entry----------------------------
 394 // The stated purpose of this routine is to take care of save-on-entry
 395 // registers.  However, the overall goal of the Match phase is to convert into
 396 // machine-specific instructions which have RegMasks to guide allocation.
 397 // So what this procedure really does is put a valid RegMask on each input
 398 // to the machine-specific variations of all Return, TailCall and Halt
 399 // instructions.  It also adds edgs to define the save-on-entry values (and of
 400 // course gives them a mask).
 401 
 402 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 403   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 404   // Do all the pre-defined register masks
 405   rms[TypeFunc::Control  ] = RegMask::Empty;
 406   rms[TypeFunc::I_O      ] = RegMask::Empty;
 407   rms[TypeFunc::Memory   ] = RegMask::Empty;
 408   rms[TypeFunc::ReturnAdr] = ret_adr;
 409   rms[TypeFunc::FramePtr ] = fp;
 410   return rms;
 411 }
 412 
 413 //---------------------------init_first_stack_mask-----------------------------
 414 // Create the initial stack mask used by values spilling to the stack.
 415 // Disallow any debug info in outgoing argument areas by setting the
 416 // initial mask accordingly.
 417 void Matcher::init_first_stack_mask() {
 418 
 419   // Allocate storage for spill masks as masks for the appropriate load type.
 420   RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+5));
 421 
 422   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegN)] = &rms[0];
 423   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegI)] = &rms[1];
 424   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegL)] = &rms[2];
 425   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegF)] = &rms[3];
 426   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegD)] = &rms[4];
 427   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_RegP)] = &rms[5];
 428 
 429   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegN)] = &rms[6];
 430   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegI)] = &rms[7];
 431   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegL)] = &rms[8];
 432   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegF)] = &rms[9];
 433   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegD)] = &rms[10];
 434   idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegP)] = &rms[11];
 435 
 436   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegN)] = &rms[12];
 437   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegI)] = &rms[13];
 438   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegL)] = &rms[14];
 439   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegF)] = &rms[15];
 440   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegD)] = &rms[16];
 441   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegP)] = &rms[17];
 442 
 443   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecS)] = &rms[18];
 444   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecD)] = &rms[19];
 445   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecX)] = &rms[20];
 446   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecY)] = &rms[21];
 447   idealreg2spillmask  [static_cast<uint>(Opcodes::Op_VecZ)] = &rms[22];
 448 
 449   OptoReg::Name i;
 450 
 451   // At first, start with the empty mask
 452   C->FIRST_STACK_mask().Clear();
 453 
 454   // Add in the incoming argument area
 455   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 456   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 457     C->FIRST_STACK_mask().Insert(i);
 458   }
 459   // Add in all bits past the outgoing argument area
 460   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 461             "must be able to represent all call arguments in reg mask");
 462   OptoReg::Name init = _out_arg_limit;
 463   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 464     C->FIRST_STACK_mask().Insert(i);
 465   }
 466   // Finally, set the "infinite stack" bit.
 467   C->FIRST_STACK_mask().set_AllStack();
 468 
 469   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 470   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 471   // Keep spill masks aligned.
 472   aligned_stack_mask.clear_to_pairs();
 473   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 474 
 475   *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_RegP)];
 476 #ifdef _LP64
 477   *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegN)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_RegN)];
 478    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegN)]->OR(C->FIRST_STACK_mask());
 479    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)]->OR(aligned_stack_mask);
 480 #else
 481    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)]->OR(C->FIRST_STACK_mask());
 482 #endif
 483   *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegI)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)];
 484    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegI)]->OR(C->FIRST_STACK_mask());
 485   *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegL)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_RegL)];
 486    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegL)]->OR(aligned_stack_mask);
 487   *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegF)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_RegF)];
 488    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegF)]->OR(C->FIRST_STACK_mask());
 489   *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegD)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_RegD)];
 490    idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegD)]->OR(aligned_stack_mask);
 491 
 492   if (Matcher::vector_size_supported(T_BYTE,4)) {
 493     *idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecS)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_VecS)];
 494      idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecS)]->OR(C->FIRST_STACK_mask());
 495   }
 496   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 497     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 498     // RA guarantees such alignment since it is needed for Double and Long values.
 499     *idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecD)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_VecD)];
 500      idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecD)]->OR(aligned_stack_mask);
 501   }
 502   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 503     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
 504     //
 505     // RA can use input arguments stack slots for spills but until RA
 506     // we don't know frame size and offset of input arg stack slots.
 507     //
 508     // Exclude last input arg stack slots to avoid spilling vectors there
 509     // otherwise vector spills could stomp over stack slots in caller frame.
 510     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 511     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
 512       aligned_stack_mask.Remove(in);
 513       in = OptoReg::add(in, -1);
 514     }
 515      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 516      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 517     *idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecX)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_VecX)];
 518      idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecX)]->OR(aligned_stack_mask);
 519   }
 520   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 521     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 522     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 523     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 524       aligned_stack_mask.Remove(in);
 525       in = OptoReg::add(in, -1);
 526     }
 527      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 528      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 529     *idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecY)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_VecY)];
 530      idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecY)]->OR(aligned_stack_mask);
 531   }
 532   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 533     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 534     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 535     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 536       aligned_stack_mask.Remove(in);
 537       in = OptoReg::add(in, -1);
 538     }
 539      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 540      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 541     *idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecZ)] = *idealreg2regmask[static_cast<uint>(Opcodes::Op_VecZ)];
 542      idealreg2spillmask[static_cast<uint>(Opcodes::Op_VecZ)]->OR(aligned_stack_mask);
 543   }
 544    if (UseFPUForSpilling) {
 545      // This mask logic assumes that the spill operations are
 546      // symmetric and that the registers involved are the same size.
 547      // On sparc for instance we may have to use 64 bit moves will
 548      // kill 2 registers when used with F0-F31.
 549      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegI)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegF)]);
 550      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegF)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)]);
 551 #ifdef _LP64
 552      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegN)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegF)]);
 553      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegL)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegD)]);
 554      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegD)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegL)]);
 555      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegD)]);
 556 #else
 557      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegF)]);
 558 #ifdef ARM
 559      // ARM has support for moving 64bit values between a pair of
 560      // integer registers and a double register
 561      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegL)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegD)]);
 562      idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegD)]->OR(*idealreg2regmask[static_cast<uint>(Opcodes::Op_RegL)]);
 563 #endif
 564 #endif
 565    }
 566 
 567   // Make up debug masks.  Any spill slot plus callee-save registers.
 568   // Caller-save registers are assumed to be trashable by the various
 569   // inline-cache fixup routines.
 570   *idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegN)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegN)];
 571   *idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegI)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegI)];
 572   *idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegL)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegL)];
 573   *idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegF)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegF)];
 574   *idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegD)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegD)];
 575   *idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegP)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)];
 576 
 577   *idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegN)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegN)];
 578   *idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegI)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegI)];
 579   *idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegL)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegL)];
 580   *idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegF)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegF)];
 581   *idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegD)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegD)];
 582   *idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegP)]= *idealreg2spillmask[static_cast<uint>(Opcodes::Op_RegP)];
 583 
 584   // Prevent stub compilations from attempting to reference
 585   // callee-saved registers from debug info
 586   bool exclude_soe = !Compile::current()->is_method_compilation();
 587 
 588   for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
 589     // registers the caller has to save do not work
 590     if( _register_save_policy[i] == 'C' ||
 591         _register_save_policy[i] == 'A' ||
 592         (_register_save_policy[i] == 'E' && exclude_soe) ) {
 593       idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegN)]->Remove(i);
 594       idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegI)]->Remove(i); // Exclude save-on-call
 595       idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegL)]->Remove(i); // registers from debug
 596       idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegF)]->Remove(i); // masks
 597       idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegD)]->Remove(i);
 598       idealreg2debugmask  [static_cast<uint>(Opcodes::Op_RegP)]->Remove(i);
 599 
 600       idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegN)]->Remove(i);
 601       idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegI)]->Remove(i);
 602       idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegL)]->Remove(i);
 603       idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegF)]->Remove(i);
 604       idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegD)]->Remove(i);
 605       idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegP)]->Remove(i);
 606     }
 607   }
 608 
 609   // Subtract the register we use to save the SP for MethodHandle
 610   // invokes to from the debug mask.
 611   const RegMask save_mask = method_handle_invoke_SP_save_mask();
 612   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegN)]->SUBTRACT(save_mask);
 613   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegI)]->SUBTRACT(save_mask);
 614   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegL)]->SUBTRACT(save_mask);
 615   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegF)]->SUBTRACT(save_mask);
 616   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegD)]->SUBTRACT(save_mask);
 617   idealreg2mhdebugmask[static_cast<uint>(Opcodes::Op_RegP)]->SUBTRACT(save_mask);
 618 }
 619 
 620 //---------------------------is_save_on_entry----------------------------------
 621 bool Matcher::is_save_on_entry( int reg ) {
 622   return
 623     _register_save_policy[reg] == 'E' ||
 624     _register_save_policy[reg] == 'A' || // Save-on-entry register?
 625     // Also save argument registers in the trampolining stubs
 626     (C->save_argument_registers() && is_spillable_arg(reg));
 627 }
 628 
 629 //---------------------------Fixup_Save_On_Entry-------------------------------
 630 void Matcher::Fixup_Save_On_Entry( ) {
 631   init_first_stack_mask();
 632 
 633   Node *root = C->root();       // Short name for root
 634   // Count number of save-on-entry registers.
 635   uint soe_cnt = number_of_saved_registers();
 636   uint i;
 637 
 638   // Find the procedure Start Node
 639   StartNode *start = C->start();
 640   assert( start, "Expect a start node" );
 641 
 642   // Save argument registers in the trampolining stubs
 643   if( C->save_argument_registers() )
 644     for( i = 0; i < _last_Mach_Reg; i++ )
 645       if( is_spillable_arg(i) )
 646         soe_cnt++;
 647 
 648   // Input RegMask array shared by all Returns.
 649   // The type for doubles and longs has a count of 2, but
 650   // there is only 1 returned value
 651   uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
 652   RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 653   // Returns have 0 or 1 returned values depending on call signature.
 654   // Return register is specified by return_value in the AD file.
 655   if (ret_edge_cnt > TypeFunc::Parms)
 656     ret_rms[TypeFunc::Parms+0] = _return_value_mask;
 657 
 658   // Input RegMask array shared by all Rethrows.
 659   uint reth_edge_cnt = TypeFunc::Parms+1;
 660   RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 661   // Rethrow takes exception oop only, but in the argument 0 slot.
 662   OptoReg::Name reg = find_receiver(false);
 663   if (reg >= 0) {
 664     reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
 665 #ifdef _LP64
 666     // Need two slots for ptrs in 64-bit land
 667     reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
 668 #endif
 669   }
 670 
 671   // Input RegMask array shared by all TailCalls
 672   uint tail_call_edge_cnt = TypeFunc::Parms+2;
 673   RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 674 
 675   // Input RegMask array shared by all TailJumps
 676   uint tail_jump_edge_cnt = TypeFunc::Parms+2;
 677   RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 678 
 679   // TailCalls have 2 returned values (target & moop), whose masks come
 680   // from the usual MachNode/MachOper mechanism.  Find a sample
 681   // TailCall to extract these masks and put the correct masks into
 682   // the tail_call_rms array.
 683   for( i=1; i < root->req(); i++ ) {
 684     MachReturnNode *m = root->in(i)->as_MachReturn();
 685     if( m->ideal_Opcode() == Opcodes::Op_TailCall ) {
 686       tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 687       tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 688       break;
 689     }
 690   }
 691 
 692   // TailJumps have 2 returned values (target & ex_oop), whose masks come
 693   // from the usual MachNode/MachOper mechanism.  Find a sample
 694   // TailJump to extract these masks and put the correct masks into
 695   // the tail_jump_rms array.
 696   for( i=1; i < root->req(); i++ ) {
 697     MachReturnNode *m = root->in(i)->as_MachReturn();
 698     if( m->ideal_Opcode() == Opcodes::Op_TailJump ) {
 699       tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 700       tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 701       break;
 702     }
 703   }
 704 
 705   // Input RegMask array shared by all Halts
 706   uint halt_edge_cnt = TypeFunc::Parms;
 707   RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 708 
 709   // Capture the return input masks into each exit flavor
 710   for( i=1; i < root->req(); i++ ) {
 711     MachReturnNode *exit = root->in(i)->as_MachReturn();
 712     switch( exit->ideal_Opcode() ) {
 713       case Opcodes::Op_Return   : exit->_in_rms = ret_rms;  break;
 714       case Opcodes::Op_Rethrow  : exit->_in_rms = reth_rms; break;
 715       case Opcodes::Op_TailCall : exit->_in_rms = tail_call_rms; break;
 716       case Opcodes::Op_TailJump : exit->_in_rms = tail_jump_rms; break;
 717       case Opcodes::Op_Halt     : exit->_in_rms = halt_rms; break;
 718       default          : ShouldNotReachHere();
 719     }
 720   }
 721 
 722   // Next unused projection number from Start.
 723   int proj_cnt = C->tf()->domain()->cnt();
 724 
 725   // Do all the save-on-entry registers.  Make projections from Start for
 726   // them, and give them a use at the exit points.  To the allocator, they
 727   // look like incoming register arguments.
 728   for( i = 0; i < _last_Mach_Reg; i++ ) {
 729     if( is_save_on_entry(i) ) {
 730 
 731       // Add the save-on-entry to the mask array
 732       ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
 733       reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
 734       tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
 735       tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
 736       // Halts need the SOE registers, but only in the stack as debug info.
 737       // A just-prior uncommon-trap or deoptimization will use the SOE regs.
 738       halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[static_cast<uint>(_register_save_type[i])];
 739 
 740       Node *mproj;
 741 
 742       // Is this a RegF low half of a RegD?  Double up 2 adjacent RegF's
 743       // into a single RegD.
 744       if( (i&1) == 0 &&
 745           _register_save_type[i  ] == Opcodes::Op_RegF &&
 746           _register_save_type[i+1] == Opcodes::Op_RegF &&
 747           is_save_on_entry(i+1) ) {
 748         // Add other bit for double
 749         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 750         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 751         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 752         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 753         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 754         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Opcodes::Op_RegD );
 755         proj_cnt += 2;          // Skip 2 for doubles
 756       }
 757       else if( (i&1) == 1 &&    // Else check for high half of double
 758                _register_save_type[i-1] == Opcodes::Op_RegF &&
 759                _register_save_type[i  ] == Opcodes::Op_RegF &&
 760                is_save_on_entry(i-1) ) {
 761         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 762         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 763         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 764         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 765         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 766         mproj = C->top();
 767       }
 768       // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
 769       // into a single RegL.
 770       else if( (i&1) == 0 &&
 771           _register_save_type[i  ] == Opcodes::Op_RegI &&
 772           _register_save_type[i+1] == Opcodes::Op_RegI &&
 773         is_save_on_entry(i+1) ) {
 774         // Add other bit for long
 775         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 776         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 777         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 778         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 779         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 780         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Opcodes::Op_RegL );
 781         proj_cnt += 2;          // Skip 2 for longs
 782       }
 783       else if( (i&1) == 1 &&    // Else check for high half of long
 784                _register_save_type[i-1] == Opcodes::Op_RegI &&
 785                _register_save_type[i  ] == Opcodes::Op_RegI &&
 786                is_save_on_entry(i-1) ) {
 787         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 788         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 789         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 790         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 791         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 792         mproj = C->top();
 793       } else {
 794         // Make a projection for it off the Start
 795         mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
 796       }
 797 
 798       ret_edge_cnt ++;
 799       reth_edge_cnt ++;
 800       tail_call_edge_cnt ++;
 801       tail_jump_edge_cnt ++;
 802       halt_edge_cnt ++;
 803 
 804       // Add a use of the SOE register to all exit paths
 805       for( uint j=1; j < root->req(); j++ )
 806         root->in(j)->add_req(mproj);
 807     } // End of if a save-on-entry register
 808   } // End of for all machine registers
 809 }
 810 
 811 //------------------------------init_spill_mask--------------------------------
 812 void Matcher::init_spill_mask( Node *ret ) {
 813   if( idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)] ) return; // One time only init
 814 
 815   OptoReg::c_frame_pointer = c_frame_pointer();
 816   c_frame_ptr_mask = c_frame_pointer();
 817 #ifdef _LP64
 818   // pointers are twice as big
 819   c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
 820 #endif
 821 
 822   // Start at OptoReg::stack0()
 823   STACK_ONLY_mask.Clear();
 824   OptoReg::Name init = OptoReg::stack2reg(0);
 825   // STACK_ONLY_mask is all stack bits
 826   OptoReg::Name i;
 827   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
 828     STACK_ONLY_mask.Insert(i);
 829   // Also set the "infinite stack" bit.
 830   STACK_ONLY_mask.set_AllStack();
 831 
 832   // Copy the register names over into the shared world
 833   for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
 834     // SharedInfo::regName[i] = regName[i];
 835     // Handy RegMasks per machine register
 836     mreg2regmask[i].Insert(i);
 837   }
 838 
 839   // Grab the Frame Pointer
 840   Node *fp  = ret->in(TypeFunc::FramePtr);
 841   Node *mem = ret->in(TypeFunc::Memory);
 842   const TypePtr* atp = TypePtr::BOTTOM;
 843   // Share frame pointer while making spill ops
 844   set_shared(fp);
 845 
 846   // Compute generic short-offset Loads
 847 #ifdef _LP64
 848   MachNode *spillCP = match_tree(new LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
 849 #endif
 850   MachNode *spillI  = match_tree(new LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
 851   MachNode *spillL  = match_tree(new LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest, false));
 852   MachNode *spillF  = match_tree(new LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
 853   MachNode *spillD  = match_tree(new LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
 854   MachNode *spillP  = match_tree(new LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
 855   assert(spillI != NULL && spillL != NULL && spillF != NULL &&
 856          spillD != NULL && spillP != NULL, "");
 857   // Get the ADLC notion of the right regmask, for each basic type.
 858 #ifdef _LP64
 859   idealreg2regmask[static_cast<uint>(Opcodes::Op_RegN)] = &spillCP->out_RegMask();
 860 #endif
 861   idealreg2regmask[static_cast<uint>(Opcodes::Op_RegI)] = &spillI->out_RegMask();
 862   idealreg2regmask[static_cast<uint>(Opcodes::Op_RegL)] = &spillL->out_RegMask();
 863   idealreg2regmask[static_cast<uint>(Opcodes::Op_RegF)] = &spillF->out_RegMask();
 864   idealreg2regmask[static_cast<uint>(Opcodes::Op_RegD)] = &spillD->out_RegMask();
 865   idealreg2regmask[static_cast<uint>(Opcodes::Op_RegP)] = &spillP->out_RegMask();
 866 
 867   // Vector regmasks.
 868   if (Matcher::vector_size_supported(T_BYTE,4)) {
 869     TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
 870     MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
 871     idealreg2regmask[static_cast<uint>(Opcodes::Op_VecS)] = &spillVectS->out_RegMask();
 872   }
 873   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 874     MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
 875     idealreg2regmask[static_cast<uint>(Opcodes::Op_VecD)] = &spillVectD->out_RegMask();
 876   }
 877   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 878     MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
 879     idealreg2regmask[static_cast<uint>(Opcodes::Op_VecX)] = &spillVectX->out_RegMask();
 880   }
 881   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 882     MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
 883     idealreg2regmask[static_cast<uint>(Opcodes::Op_VecY)] = &spillVectY->out_RegMask();
 884   }
 885   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 886     MachNode *spillVectZ = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTZ));
 887     idealreg2regmask[static_cast<uint>(Opcodes::Op_VecZ)] = &spillVectZ->out_RegMask();
 888   }
 889 }
 890 
 891 #ifdef ASSERT
 892 static void match_alias_type(Compile* C, Node* n, Node* m) {
 893   if (!VerifyAliases)  return;  // do not go looking for trouble by default
 894   const TypePtr* nat = n->adr_type();
 895   const TypePtr* mat = m->adr_type();
 896   int nidx = C->get_alias_index(nat);
 897   int midx = C->get_alias_index(mat);
 898   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
 899   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
 900     for (uint i = 1; i < n->req(); i++) {
 901       Node* n1 = n->in(i);
 902       const TypePtr* n1at = n1->adr_type();
 903       if (n1at != NULL) {
 904         nat = n1at;
 905         nidx = C->get_alias_index(n1at);
 906       }
 907     }
 908   }
 909   // %%% Kludgery.  Instead, fix ideal adr_type methods for all these cases:
 910   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
 911     switch (n->Opcode()) {
 912     case Opcodes::Op_PrefetchAllocation:
 913       nidx = Compile::AliasIdxRaw;
 914       nat = TypeRawPtr::BOTTOM;
 915       break;
 916     }
 917   }
 918   if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
 919     switch (n->Opcode()) {
 920     case Opcodes::Op_ClearArray:
 921       midx = Compile::AliasIdxRaw;
 922       mat = TypeRawPtr::BOTTOM;
 923       break;
 924     }
 925   }
 926   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
 927     switch (n->Opcode()) {
 928     case Opcodes::Op_Return:
 929     case Opcodes::Op_Rethrow:
 930     case Opcodes::Op_Halt:
 931     case Opcodes::Op_TailCall:
 932     case Opcodes::Op_TailJump:
 933       nidx = Compile::AliasIdxBot;
 934       nat = TypePtr::BOTTOM;
 935       break;
 936     }
 937   }
 938   if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
 939     switch (n->Opcode()) {
 940     case Opcodes::Op_StrComp:
 941     case Opcodes::Op_StrEquals:
 942     case Opcodes::Op_StrIndexOf:
 943     case Opcodes::Op_StrIndexOfChar:
 944     case Opcodes::Op_AryEq:
 945     case Opcodes::Op_HasNegatives:
 946     case Opcodes::Op_MemBarVolatile:
 947     case Opcodes::Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
 948     case Opcodes::Op_StrInflatedCopy:
 949     case Opcodes::Op_StrCompressedCopy:
 950     case Opcodes::Op_OnSpinWait:
 951     case Opcodes::Op_EncodeISOArray:
 952       nidx = Compile::AliasIdxTop;
 953       nat = NULL;
 954       break;
 955     }
 956   }
 957   if (nidx != midx) {
 958     if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
 959       tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
 960       n->dump();
 961       m->dump();
 962     }
 963     assert(C->subsume_loads() && C->must_alias(nat, midx),
 964            "must not lose alias info when matching");
 965   }
 966 }
 967 #endif
 968 
 969 //------------------------------xform------------------------------------------
 970 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
 971 // Node in new-space.  Given a new-space Node, recursively walk his children.
 972 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
 973 Node *Matcher::xform( Node *n, int max_stack ) {
 974   // Use one stack to keep both: child's node/state and parent's node/index
 975   MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
 976   mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
 977 
 978   while (mstack.is_nonempty()) {
 979     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
 980     if (C->failing()) return NULL;
 981     n = mstack.node();          // Leave node on stack
 982     Node_State nstate = mstack.state();
 983     if (nstate == Visit) {
 984       mstack.set_state(Post_Visit);
 985       Node *oldn = n;
 986       // Old-space or new-space check
 987       if (!C->node_arena()->contains(n)) {
 988         // Old space!
 989         Node* m;
 990         if (has_new_node(n)) {  // Not yet Label/Reduced
 991           m = new_node(n);
 992         } else {
 993           if (!is_dontcare(n)) { // Matcher can match this guy
 994             // Calls match special.  They match alone with no children.
 995             // Their children, the incoming arguments, match normally.
 996             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
 997             if (C->failing())  return NULL;
 998             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
 999           } else {                  // Nothing the matcher cares about
1000             if( n->is_Proj() && n->in(0)->is_Multi()) {       // Projections?
1001               // Convert to machine-dependent projection
1002               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1003 #ifdef ASSERT
1004               _new2old_map.map(m->_idx, n);
1005 #endif
1006               if (m->in(0) != NULL) // m might be top
1007                 collect_null_checks(m, n);
1008             } else {                // Else just a regular 'ol guy
1009               m = n->clone();       // So just clone into new-space
1010 #ifdef ASSERT
1011               _new2old_map.map(m->_idx, n);
1012 #endif
1013               // Def-Use edges will be added incrementally as Uses
1014               // of this node are matched.
1015               assert(m->outcnt() == 0, "no Uses of this clone yet");
1016             }
1017           }
1018 
1019           set_new_node(n, m);       // Map old to new
1020           if (_old_node_note_array != NULL) {
1021             Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1022                                                   n->_idx);
1023             C->set_node_notes_at(m->_idx, nn);
1024           }
1025           debug_only(match_alias_type(C, n, m));
1026         }
1027         n = m;    // n is now a new-space node
1028         mstack.set_node(n);
1029       }
1030 
1031       // New space!
1032       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1033 
1034       int i;
1035       // Put precedence edges on stack first (match them last).
1036       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1037         Node *m = oldn->in(i);
1038         if (m == NULL) break;
1039         // set -1 to call add_prec() instead of set_req() during Step1
1040         mstack.push(m, Visit, n, -1);
1041       }
1042 
1043       // Handle precedence edges for interior nodes
1044       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1045         Node *m = n->in(i);
1046         if (m == NULL || C->node_arena()->contains(m)) continue;
1047         n->rm_prec(i);
1048         // set -1 to call add_prec() instead of set_req() during Step1
1049         mstack.push(m, Visit, n, -1);
1050       }
1051 
1052       // For constant debug info, I'd rather have unmatched constants.
1053       int cnt = n->req();
1054       JVMState* jvms = n->jvms();
1055       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1056 
1057       // Now do only debug info.  Clone constants rather than matching.
1058       // Constants are represented directly in the debug info without
1059       // the need for executable machine instructions.
1060       // Monitor boxes are also represented directly.
1061       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1062         Node *m = n->in(i);          // Get input
1063         Opcodes op = m->Opcode();
1064         assert((op == Opcodes::Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1065         if( op == Opcodes::Op_ConI || op == Opcodes::Op_ConP || op == Opcodes::Op_ConN || op == Opcodes::Op_ConNKlass ||
1066             op == Opcodes::Op_ConF || op == Opcodes::Op_ConD || op == Opcodes::Op_ConL
1067             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1068             ) {
1069           m = m->clone();
1070 #ifdef ASSERT
1071           _new2old_map.map(m->_idx, n);
1072 #endif
1073           mstack.push(m, Post_Visit, n, i); // Don't need to visit
1074           mstack.push(m->in(0), Visit, m, 0);
1075         } else {
1076           mstack.push(m, Visit, n, i);
1077         }
1078       }
1079 
1080       // And now walk his children, and convert his inputs to new-space.
1081       for( ; i >= 0; --i ) { // For all normal inputs do
1082         Node *m = n->in(i);  // Get input
1083         if(m != NULL)
1084           mstack.push(m, Visit, n, i);
1085       }
1086 
1087     }
1088     else if (nstate == Post_Visit) {
1089       // Set xformed input
1090       Node *p = mstack.parent();
1091       if (p != NULL) { // root doesn't have parent
1092         int i = (int)mstack.index();
1093         if (i >= 0)
1094           p->set_req(i, n); // required input
1095         else if (i == -1)
1096           p->add_prec(n);   // precedence input
1097         else
1098           ShouldNotReachHere();
1099       }
1100       mstack.pop(); // remove processed node from stack
1101     }
1102     else {
1103       ShouldNotReachHere();
1104     }
1105   } // while (mstack.is_nonempty())
1106   return n; // Return new-space Node
1107 }
1108 
1109 //------------------------------warp_outgoing_stk_arg------------------------
1110 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1111   // Convert outgoing argument location to a pre-biased stack offset
1112   if (reg->is_stack()) {
1113     OptoReg::Name warped = reg->reg2stack();
1114     // Adjust the stack slot offset to be the register number used
1115     // by the allocator.
1116     warped = OptoReg::add(begin_out_arg_area, warped);
1117     // Keep track of the largest numbered stack slot used for an arg.
1118     // Largest used slot per call-site indicates the amount of stack
1119     // that is killed by the call.
1120     if( warped >= out_arg_limit_per_call )
1121       out_arg_limit_per_call = OptoReg::add(warped,1);
1122     if (!RegMask::can_represent_arg(warped)) {
1123       C->record_method_not_compilable("unsupported calling sequence");
1124       return OptoReg::Bad;
1125     }
1126     return warped;
1127   }
1128   return OptoReg::as_OptoReg(reg);
1129 }
1130 
1131 
1132 //------------------------------match_sfpt-------------------------------------
1133 // Helper function to match call instructions.  Calls match special.
1134 // They match alone with no children.  Their children, the incoming
1135 // arguments, match normally.
1136 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1137   MachSafePointNode *msfpt = NULL;
1138   MachCallNode      *mcall = NULL;
1139   uint               cnt;
1140   // Split out case for SafePoint vs Call
1141   CallNode *call;
1142   const TypeTuple *domain;
1143   ciMethod*        method = NULL;
1144   bool             is_method_handle_invoke = false;  // for special kill effects
1145   if( sfpt->is_Call() ) {
1146     call = sfpt->as_Call();
1147     domain = call->tf()->domain();
1148     cnt = domain->cnt();
1149 
1150     // Match just the call, nothing else
1151     MachNode *m = match_tree(call);
1152     if (C->failing())  return NULL;
1153     if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1154 
1155     // Copy data from the Ideal SafePoint to the machine version
1156     mcall = m->as_MachCall();
1157 
1158     mcall->set_tf(         call->tf());
1159     mcall->set_entry_point(call->entry_point());
1160     mcall->set_cnt(        call->cnt());
1161 
1162     if( mcall->is_MachCallJava() ) {
1163       MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
1164       const CallJavaNode *call_java =  call->as_CallJava();
1165       method = call_java->method();
1166       mcall_java->_method = method;
1167       mcall_java->_bci = call_java->_bci;
1168       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1169       is_method_handle_invoke = call_java->is_method_handle_invoke();
1170       mcall_java->_method_handle_invoke = is_method_handle_invoke;
1171       mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1172       if (is_method_handle_invoke) {
1173         C->set_has_method_handle_invokes(true);
1174       }
1175       if( mcall_java->is_MachCallStaticJava() )
1176         mcall_java->as_MachCallStaticJava()->_name =
1177          call_java->as_CallStaticJava()->_name;
1178       if( mcall_java->is_MachCallDynamicJava() )
1179         mcall_java->as_MachCallDynamicJava()->_vtable_index =
1180          call_java->as_CallDynamicJava()->_vtable_index;
1181     }
1182     else if( mcall->is_MachCallRuntime() ) {
1183       mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1184     }
1185     msfpt = mcall;
1186   }
1187   // This is a non-call safepoint
1188   else {
1189     call = NULL;
1190     domain = NULL;
1191     MachNode *mn = match_tree(sfpt);
1192     if (C->failing())  return NULL;
1193     msfpt = mn->as_MachSafePoint();
1194     cnt = TypeFunc::Parms;
1195   }
1196 
1197   // Advertise the correct memory effects (for anti-dependence computation).
1198   msfpt->set_adr_type(sfpt->adr_type());
1199 
1200   // Allocate a private array of RegMasks.  These RegMasks are not shared.
1201   msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1202   // Empty them all.
1203   memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
1204 
1205   // Do all the pre-defined non-Empty register masks
1206   msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1207   msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1208 
1209   // Place first outgoing argument can possibly be put.
1210   OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1211   assert( is_even(begin_out_arg_area), "" );
1212   // Compute max outgoing register number per call site.
1213   OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1214   // Calls to C may hammer extra stack slots above and beyond any arguments.
1215   // These are usually backing store for register arguments for varargs.
1216   if( call != NULL && call->is_CallRuntime() )
1217     out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1218 
1219 
1220   // Do the normal argument list (parameters) register masks
1221   int argcnt = cnt - TypeFunc::Parms;
1222   if( argcnt > 0 ) {          // Skip it all if we have no args
1223     BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1224     VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1225     int i;
1226     for( i = 0; i < argcnt; i++ ) {
1227       sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1228     }
1229     // V-call to pick proper calling convention
1230     call->calling_convention( sig_bt, parm_regs, argcnt );
1231 
1232 #ifdef ASSERT
1233     // Sanity check users' calling convention.  Really handy during
1234     // the initial porting effort.  Fairly expensive otherwise.
1235     { for (int i = 0; i<argcnt; i++) {
1236       if( !parm_regs[i].first()->is_valid() &&
1237           !parm_regs[i].second()->is_valid() ) continue;
1238       VMReg reg1 = parm_regs[i].first();
1239       VMReg reg2 = parm_regs[i].second();
1240       for (int j = 0; j < i; j++) {
1241         if( !parm_regs[j].first()->is_valid() &&
1242             !parm_regs[j].second()->is_valid() ) continue;
1243         VMReg reg3 = parm_regs[j].first();
1244         VMReg reg4 = parm_regs[j].second();
1245         if( !reg1->is_valid() ) {
1246           assert( !reg2->is_valid(), "valid halvsies" );
1247         } else if( !reg3->is_valid() ) {
1248           assert( !reg4->is_valid(), "valid halvsies" );
1249         } else {
1250           assert( reg1 != reg2, "calling conv. must produce distinct regs");
1251           assert( reg1 != reg3, "calling conv. must produce distinct regs");
1252           assert( reg1 != reg4, "calling conv. must produce distinct regs");
1253           assert( reg2 != reg3, "calling conv. must produce distinct regs");
1254           assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1255           assert( reg3 != reg4, "calling conv. must produce distinct regs");
1256         }
1257       }
1258     }
1259     }
1260 #endif
1261 
1262     // Visit each argument.  Compute its outgoing register mask.
1263     // Return results now can have 2 bits returned.
1264     // Compute max over all outgoing arguments both per call-site
1265     // and over the entire method.
1266     for( i = 0; i < argcnt; i++ ) {
1267       // Address of incoming argument mask to fill in
1268       RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1269       if( !parm_regs[i].first()->is_valid() &&
1270           !parm_regs[i].second()->is_valid() ) {
1271         continue;               // Avoid Halves
1272       }
1273       // Grab first register, adjust stack slots and insert in mask.
1274       OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1275       if (OptoReg::is_valid(reg1))
1276         rm->Insert( reg1 );
1277       // Grab second register (if any), adjust stack slots and insert in mask.
1278       OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1279       if (OptoReg::is_valid(reg2))
1280         rm->Insert( reg2 );
1281     } // End of for all arguments
1282 
1283     // Compute number of stack slots needed to restore stack in case of
1284     // Pascal-style argument popping.
1285     mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1286   }
1287 
1288   // Compute the max stack slot killed by any call.  These will not be
1289   // available for debug info, and will be used to adjust FIRST_STACK_mask
1290   // after all call sites have been visited.
1291   if( _out_arg_limit < out_arg_limit_per_call)
1292     _out_arg_limit = out_arg_limit_per_call;
1293 
1294   if (mcall) {
1295     // Kill the outgoing argument area, including any non-argument holes and
1296     // any legacy C-killed slots.  Use Fat-Projections to do the killing.
1297     // Since the max-per-method covers the max-per-call-site and debug info
1298     // is excluded on the max-per-method basis, debug info cannot land in
1299     // this killed area.
1300     uint r_cnt = mcall->tf()->range()->cnt();
1301     MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, static_cast<Opcodes>(MachProjNode::projType::fat_proj) );
1302     if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1303       C->record_method_not_compilable("unsupported outgoing calling sequence");
1304     } else {
1305       for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1306         proj->_rout.Insert(OptoReg::Name(i));
1307     }
1308     if (proj->_rout.is_NotEmpty()) {
1309       push_projection(proj);
1310     }
1311   }
1312   // Transfer the safepoint information from the call to the mcall
1313   // Move the JVMState list
1314   msfpt->set_jvms(sfpt->jvms());
1315   for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1316     jvms->set_map(sfpt);
1317   }
1318 
1319   // Debug inputs begin just after the last incoming parameter
1320   assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1321          (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1322 
1323   // Move the OopMap
1324   msfpt->_oop_map = sfpt->_oop_map;
1325 
1326   // Add additional edges.
1327   if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1328     // For these calls we can not add MachConstantBase in expand(), as the
1329     // ins are not complete then.
1330     msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1331     if (msfpt->jvms() &&
1332         msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1333       // We added an edge before jvms, so we must adapt the position of the ins.
1334       msfpt->jvms()->adapt_position(+1);
1335     }
1336   }
1337 
1338   // Registers killed by the call are set in the local scheduling pass
1339   // of Global Code Motion.
1340   return msfpt;
1341 }
1342 
1343 //---------------------------match_tree----------------------------------------
1344 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce.  Used as part
1345 // of the whole-sale conversion from Ideal to Mach Nodes.  Also used for
1346 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1347 // a Load's result RegMask for memoization in idealreg2regmask[]
1348 MachNode *Matcher::match_tree( const Node *n ) {
1349   assert( n->Opcode() != Opcodes::Op_Phi, "cannot match" );
1350   assert( !n->is_block_start(), "cannot match" );
1351   // Set the mark for all locally allocated State objects.
1352   // When this call returns, the _states_arena arena will be reset
1353   // freeing all State objects.
1354   ResourceMark rm( &_states_arena );
1355 
1356   LabelRootDepth = 0;
1357 
1358   // StoreNodes require their Memory input to match any LoadNodes
1359   Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1360 #ifdef ASSERT
1361   Node* save_mem_node = _mem_node;
1362   _mem_node = n->is_Store() ? (Node*)n : NULL;
1363 #endif
1364   // State object for root node of match tree
1365   // Allocate it on _states_arena - stack allocation can cause stack overflow.
1366   State *s = new (&_states_arena) State;
1367   s->_kids[0] = NULL;
1368   s->_kids[1] = NULL;
1369   s->_leaf = (Node*)n;
1370   // Label the input tree, allocating labels from top-level arena
1371   Label_Root( n, s, n->in(0), mem );
1372   if (C->failing())  return NULL;
1373 
1374   // The minimum cost match for the whole tree is found at the root State
1375   uint mincost = max_juint;
1376   uint cost = max_juint;
1377   uint i;
1378   for( i = 0; i < NUM_OPERANDS; i++ ) {
1379     if( s->valid(i) &&                // valid entry and
1380         s->_cost[i] < cost &&         // low cost and
1381         s->_rule[i] >= NUM_OPERANDS ) // not an operand
1382       cost = s->_cost[mincost=i];
1383   }
1384   if (mincost == max_juint) {
1385 #ifndef PRODUCT
1386     tty->print("No matching rule for:");
1387     s->dump();
1388 #endif
1389     Matcher::soft_match_failure();
1390     return NULL;
1391   }
1392   // Reduce input tree based upon the state labels to machine Nodes
1393   MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1394 #ifdef ASSERT
1395   _old2new_map.map(n->_idx, m);
1396   _new2old_map.map(m->_idx, (Node*)n);
1397 #endif
1398 
1399   // Add any Matcher-ignored edges
1400   uint cnt = n->req();
1401   uint start = 1;
1402   if( mem != (Node*)1 ) start = MemNode::Memory+1;
1403   if( n->is_AddP() ) {
1404     assert( mem == (Node*)1, "" );
1405     start = AddPNode::Base+1;
1406   }
1407   for( i = start; i < cnt; i++ ) {
1408     if( !n->match_edge(i) ) {
1409       if( i < m->req() )
1410         m->ins_req( i, n->in(i) );
1411       else
1412         m->add_req( n->in(i) );
1413     }
1414   }
1415 
1416   debug_only( _mem_node = save_mem_node; )
1417   return m;
1418 }
1419 
1420 
1421 //------------------------------match_into_reg---------------------------------
1422 // Choose to either match this Node in a register or part of the current
1423 // match tree.  Return true for requiring a register and false for matching
1424 // as part of the current match tree.
1425 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1426 
1427   const Type *t = m->bottom_type();
1428 
1429   if (t->singleton()) {
1430     // Never force constants into registers.  Allow them to match as
1431     // constants or registers.  Copies of the same value will share
1432     // the same register.  See find_shared_node.
1433     return false;
1434   } else {                      // Not a constant
1435     // Stop recursion if they have different Controls.
1436     Node* m_control = m->in(0);
1437     // Control of load's memory can post-dominates load's control.
1438     // So use it since load can't float above its memory.
1439     Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1440     if (control && m_control && control != m_control && control != mem_control) {
1441 
1442       // Actually, we can live with the most conservative control we
1443       // find, if it post-dominates the others.  This allows us to
1444       // pick up load/op/store trees where the load can float a little
1445       // above the store.
1446       Node *x = control;
1447       const uint max_scan = 6;  // Arbitrary scan cutoff
1448       uint j;
1449       for (j=0; j<max_scan; j++) {
1450         if (x->is_Region())     // Bail out at merge points
1451           return true;
1452         x = x->in(0);
1453         if (x == m_control)     // Does 'control' post-dominate
1454           break;                // m->in(0)?  If so, we can use it
1455         if (x == mem_control)   // Does 'control' post-dominate
1456           break;                // mem_control?  If so, we can use it
1457       }
1458       if (j == max_scan)        // No post-domination before scan end?
1459         return true;            // Then break the match tree up
1460     }
1461     if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1462         (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1463       // These are commonly used in address expressions and can
1464       // efficiently fold into them on X64 in some cases.
1465       return false;
1466     }
1467   }
1468 
1469   // Not forceable cloning.  If shared, put it into a register.
1470   return shared;
1471 }
1472 
1473 
1474 //------------------------------Instruction Selection--------------------------
1475 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1476 // ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
1477 // things the Matcher does not match (e.g., Memory), and things with different
1478 // Controls (hence forced into different blocks).  We pass in the Control
1479 // selected for this entire State tree.
1480 
1481 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1482 // Store and the Load must have identical Memories (as well as identical
1483 // pointers).  Since the Matcher does not have anything for Memory (and
1484 // does not handle DAGs), I have to match the Memory input myself.  If the
1485 // Tree root is a Store, I require all Loads to have the identical memory.
1486 Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
1487   // Since Label_Root is a recursive function, its possible that we might run
1488   // out of stack space.  See bugs 6272980 & 6227033 for more info.
1489   LabelRootDepth++;
1490   if (LabelRootDepth > MaxLabelRootDepth) {
1491     C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1492     return NULL;
1493   }
1494   uint care = 0;                // Edges matcher cares about
1495   uint cnt = n->req();
1496   uint i = 0;
1497 
1498   // Examine children for memory state
1499   // Can only subsume a child into your match-tree if that child's memory state
1500   // is not modified along the path to another input.
1501   // It is unsafe even if the other inputs are separate roots.
1502   Node *input_mem = NULL;
1503   for( i = 1; i < cnt; i++ ) {
1504     if( !n->match_edge(i) ) continue;
1505     Node *m = n->in(i);         // Get ith input
1506     assert( m, "expect non-null children" );
1507     if( m->is_Load() ) {
1508       if( input_mem == NULL ) {
1509         input_mem = m->in(MemNode::Memory);
1510       } else if( input_mem != m->in(MemNode::Memory) ) {
1511         input_mem = NodeSentinel;
1512       }
1513     }
1514   }
1515 
1516   for( i = 1; i < cnt; i++ ){// For my children
1517     if( !n->match_edge(i) ) continue;
1518     Node *m = n->in(i);         // Get ith input
1519     // Allocate states out of a private arena
1520     State *s = new (&_states_arena) State;
1521     svec->_kids[care++] = s;
1522     assert( care <= 2, "binary only for now" );
1523 
1524     // Recursively label the State tree.
1525     s->_kids[0] = NULL;
1526     s->_kids[1] = NULL;
1527     s->_leaf = m;
1528 
1529     // Check for leaves of the State Tree; things that cannot be a part of
1530     // the current tree.  If it finds any, that value is matched as a
1531     // register operand.  If not, then the normal matching is used.
1532     if( match_into_reg(n, m, control, i, is_shared(m)) ||
1533         //
1534         // Stop recursion if this is LoadNode and the root of this tree is a
1535         // StoreNode and the load & store have different memories.
1536         ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1537         // Can NOT include the match of a subtree when its memory state
1538         // is used by any of the other subtrees
1539         (input_mem == NodeSentinel) ) {
1540       // Print when we exclude matching due to different memory states at input-loads
1541       if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1542         && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1543         tty->print_cr("invalid input_mem");
1544       }
1545       // Switch to a register-only opcode; this value must be in a register
1546       // and cannot be subsumed as part of a larger instruction.
1547       s->DFA( m->ideal_reg(), m );
1548 
1549     } else {
1550       // If match tree has no control and we do, adopt it for entire tree
1551       if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1552         control = m->in(0);         // Pick up control
1553       // Else match as a normal part of the match tree.
1554       control = Label_Root(m,s,control,mem);
1555       if (C->failing()) return NULL;
1556     }
1557   }
1558 
1559 
1560   // Call DFA to match this node, and return
1561   svec->DFA( n->Opcode(), n );
1562 
1563 #ifdef ASSERT
1564   uint x;
1565   for( x = 0; x < _LAST_MACH_OPER; x++ )
1566     if( svec->valid(x) )
1567       break;
1568 
1569   if (x >= _LAST_MACH_OPER) {
1570     n->dump();
1571     svec->dump();
1572     assert( false, "bad AD file" );
1573   }
1574 #endif
1575   return control;
1576 }
1577 
1578 
1579 // Con nodes reduced using the same rule can share their MachNode
1580 // which reduces the number of copies of a constant in the final
1581 // program.  The register allocator is free to split uses later to
1582 // split live ranges.
1583 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1584   if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1585 
1586   // See if this Con has already been reduced using this rule.
1587   if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1588   MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1589   if (last != NULL && rule == last->rule()) {
1590     // Don't expect control change for DecodeN
1591     if (leaf->is_DecodeNarrowPtr())
1592       return last;
1593     // Get the new space root.
1594     Node* xroot = new_node(C->root());
1595     if (xroot == NULL) {
1596       // This shouldn't happen give the order of matching.
1597       return NULL;
1598     }
1599 
1600     // Shared constants need to have their control be root so they
1601     // can be scheduled properly.
1602     Node* control = last->in(0);
1603     if (control != xroot) {
1604       if (control == NULL || control == C->root()) {
1605         last->set_req(0, xroot);
1606       } else {
1607         assert(false, "unexpected control");
1608         return NULL;
1609       }
1610     }
1611     return last;
1612   }
1613   return NULL;
1614 }
1615 
1616 
1617 //------------------------------ReduceInst-------------------------------------
1618 // Reduce a State tree (with given Control) into a tree of MachNodes.
1619 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1620 // complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
1621 // Each MachNode has a number of complicated MachOper operands; each
1622 // MachOper also covers a further tree of Ideal Nodes.
1623 
1624 // The root of the Ideal match tree is always an instruction, so we enter
1625 // the recursion here.  After building the MachNode, we need to recurse
1626 // the tree checking for these cases:
1627 // (1) Child is an instruction -
1628 //     Build the instruction (recursively), add it as an edge.
1629 //     Build a simple operand (register) to hold the result of the instruction.
1630 // (2) Child is an interior part of an instruction -
1631 //     Skip over it (do nothing)
1632 // (3) Child is the start of a operand -
1633 //     Build the operand, place it inside the instruction
1634 //     Call ReduceOper.
1635 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1636   assert( rule >= NUM_OPERANDS, "called with operand rule" );
1637 
1638   MachNode* shared_node = find_shared_node(s->_leaf, rule);
1639   if (shared_node != NULL) {
1640     return shared_node;
1641   }
1642 
1643   // Build the object to represent this state & prepare for recursive calls
1644   MachNode *mach = s->MachNodeGenerator(rule);
1645   mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1646   assert( mach->_opnds[0] != NULL, "Missing result operand" );
1647   Node *leaf = s->_leaf;
1648   // Check for instruction or instruction chain rule
1649   if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1650     assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1651            "duplicating node that's already been matched");
1652     // Instruction
1653     mach->add_req( leaf->in(0) ); // Set initial control
1654     // Reduce interior of complex instruction
1655     ReduceInst_Interior( s, rule, mem, mach, 1 );
1656   } else {
1657     // Instruction chain rules are data-dependent on their inputs
1658     mach->add_req(0);             // Set initial control to none
1659     ReduceInst_Chain_Rule( s, rule, mem, mach );
1660   }
1661 
1662   // If a Memory was used, insert a Memory edge
1663   if( mem != (Node*)1 ) {
1664     mach->ins_req(MemNode::Memory,mem);
1665 #ifdef ASSERT
1666     // Verify adr type after matching memory operation
1667     const MachOper* oper = mach->memory_operand();
1668     if (oper != NULL && oper != (MachOper*)-1) {
1669       // It has a unique memory operand.  Find corresponding ideal mem node.
1670       Node* m = NULL;
1671       if (leaf->is_Mem()) {
1672         m = leaf;
1673       } else {
1674         m = _mem_node;
1675         assert(m != NULL && m->is_Mem(), "expecting memory node");
1676       }
1677       const Type* mach_at = mach->adr_type();
1678       // DecodeN node consumed by an address may have different type
1679       // then its input. Don't compare types for such case.
1680       if (m->adr_type() != mach_at &&
1681           (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1682            m->in(MemNode::Address)->is_AddP() &&
1683            m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() ||
1684            m->in(MemNode::Address)->is_AddP() &&
1685            m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1686            m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) {
1687         mach_at = m->adr_type();
1688       }
1689       if (m->adr_type() != mach_at) {
1690         m->dump();
1691         tty->print_cr("mach:");
1692         mach->dump(1);
1693       }
1694       assert(m->adr_type() == mach_at, "matcher should not change adr type");
1695     }
1696 #endif
1697   }
1698 
1699   // If the _leaf is an AddP, insert the base edge
1700   if (leaf->is_AddP()) {
1701     mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1702   }
1703 
1704   uint number_of_projections_prior = number_of_projections();
1705 
1706   // Perform any 1-to-many expansions required
1707   MachNode *ex = mach->Expand(s, _projection_list, mem);
1708   if (ex != mach) {
1709     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1710     if( ex->in(1)->is_Con() )
1711       ex->in(1)->set_req(0, C->root());
1712     // Remove old node from the graph
1713     for( uint i=0; i<mach->req(); i++ ) {
1714       mach->set_req(i,NULL);
1715     }
1716 #ifdef ASSERT
1717     _new2old_map.map(ex->_idx, s->_leaf);
1718 #endif
1719   }
1720 
1721   // PhaseChaitin::fixup_spills will sometimes generate spill code
1722   // via the matcher.  By the time, nodes have been wired into the CFG,
1723   // and any further nodes generated by expand rules will be left hanging
1724   // in space, and will not get emitted as output code.  Catch this.
1725   // Also, catch any new register allocation constraints ("projections")
1726   // generated belatedly during spill code generation.
1727   if (_allocation_started) {
1728     guarantee(ex == mach, "no expand rules during spill generation");
1729     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1730   }
1731 
1732   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1733     // Record the con for sharing
1734     _shared_nodes.map(leaf->_idx, ex);
1735   }
1736 
1737   return ex;
1738 }
1739 
1740 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1741   for (uint i = n->req(); i < n->len(); i++) {
1742     if (n->in(i) != NULL) {
1743       mach->add_prec(n->in(i));
1744     }
1745   }
1746 }
1747 
1748 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1749   // 'op' is what I am expecting to receive
1750   int op = _leftOp[rule];
1751   // Operand type to catch childs result
1752   // This is what my child will give me.
1753   int opnd_class_instance = s->_rule[op];
1754   // Choose between operand class or not.
1755   // This is what I will receive.
1756   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1757   // New rule for child.  Chase operand classes to get the actual rule.
1758   int newrule = s->_rule[catch_op];
1759 
1760   if( newrule < NUM_OPERANDS ) {
1761     // Chain from operand or operand class, may be output of shared node
1762     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1763             "Bad AD file: Instruction chain rule must chain from operand");
1764     // Insert operand into array of operands for this instruction
1765     mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1766 
1767     ReduceOper( s, newrule, mem, mach );
1768   } else {
1769     // Chain from the result of an instruction
1770     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1771     mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1772     Node *mem1 = (Node*)1;
1773     debug_only(Node *save_mem_node = _mem_node;)
1774     mach->add_req( ReduceInst(s, newrule, mem1) );
1775     debug_only(_mem_node = save_mem_node;)
1776   }
1777   return;
1778 }
1779 
1780 
1781 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1782   handle_precedence_edges(s->_leaf, mach);
1783 
1784   if( s->_leaf->is_Load() ) {
1785     Node *mem2 = s->_leaf->in(MemNode::Memory);
1786     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1787     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1788     mem = mem2;
1789   }
1790   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1791     if( mach->in(0) == NULL )
1792       mach->set_req(0, s->_leaf->in(0));
1793   }
1794 
1795   // Now recursively walk the state tree & add operand list.
1796   for( uint i=0; i<2; i++ ) {   // binary tree
1797     State *newstate = s->_kids[i];
1798     if( newstate == NULL ) break;      // Might only have 1 child
1799     // 'op' is what I am expecting to receive
1800     int op;
1801     if( i == 0 ) {
1802       op = _leftOp[rule];
1803     } else {
1804       op = _rightOp[rule];
1805     }
1806     // Operand type to catch childs result
1807     // This is what my child will give me.
1808     int opnd_class_instance = newstate->_rule[op];
1809     // Choose between operand class or not.
1810     // This is what I will receive.
1811     int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1812     // New rule for child.  Chase operand classes to get the actual rule.
1813     int newrule = newstate->_rule[catch_op];
1814 
1815     if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1816       // Operand/operandClass
1817       // Insert operand into array of operands for this instruction
1818       mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
1819       ReduceOper( newstate, newrule, mem, mach );
1820 
1821     } else {                    // Child is internal operand or new instruction
1822       if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1823         // internal operand --> call ReduceInst_Interior
1824         // Interior of complex instruction.  Do nothing but recurse.
1825         num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1826       } else {
1827         // instruction --> call build operand(  ) to catch result
1828         //             --> ReduceInst( newrule )
1829         mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
1830         Node *mem1 = (Node*)1;
1831         debug_only(Node *save_mem_node = _mem_node;)
1832         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1833         debug_only(_mem_node = save_mem_node;)
1834       }
1835     }
1836     assert( mach->_opnds[num_opnds-1], "" );
1837   }
1838   return num_opnds;
1839 }
1840 
1841 // This routine walks the interior of possible complex operands.
1842 // At each point we check our children in the match tree:
1843 // (1) No children -
1844 //     We are a leaf; add _leaf field as an input to the MachNode
1845 // (2) Child is an internal operand -
1846 //     Skip over it ( do nothing )
1847 // (3) Child is an instruction -
1848 //     Call ReduceInst recursively and
1849 //     and instruction as an input to the MachNode
1850 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1851   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1852   State *kid = s->_kids[0];
1853   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1854 
1855   // Leaf?  And not subsumed?
1856   if( kid == NULL && !_swallowed[rule] ) {
1857     mach->add_req( s->_leaf );  // Add leaf pointer
1858     return;                     // Bail out
1859   }
1860 
1861   if( s->_leaf->is_Load() ) {
1862     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1863     mem = s->_leaf->in(MemNode::Memory);
1864     debug_only(_mem_node = s->_leaf;)
1865   }
1866 
1867   handle_precedence_edges(s->_leaf, mach);
1868 
1869   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1870     if( !mach->in(0) )
1871       mach->set_req(0,s->_leaf->in(0));
1872     else {
1873       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1874     }
1875   }
1876 
1877   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
1878     int newrule;
1879     if( i == 0)
1880       newrule = kid->_rule[_leftOp[rule]];
1881     else
1882       newrule = kid->_rule[_rightOp[rule]];
1883 
1884     if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1885       // Internal operand; recurse but do nothing else
1886       ReduceOper( kid, newrule, mem, mach );
1887 
1888     } else {                    // Child is a new instruction
1889       // Reduce the instruction, and add a direct pointer from this
1890       // machine instruction to the newly reduced one.
1891       Node *mem1 = (Node*)1;
1892       debug_only(Node *save_mem_node = _mem_node;)
1893       mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1894       debug_only(_mem_node = save_mem_node;)
1895     }
1896   }
1897 }
1898 
1899 
1900 // -------------------------------------------------------------------------
1901 // Java-Java calling convention
1902 // (what you use when Java calls Java)
1903 
1904 //------------------------------find_receiver----------------------------------
1905 // For a given signature, return the OptoReg for parameter 0.
1906 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1907   VMRegPair regs;
1908   BasicType sig_bt = T_OBJECT;
1909   calling_convention(&sig_bt, &regs, 1, is_outgoing);
1910   // Return argument 0 register.  In the LP64 build pointers
1911   // take 2 registers, but the VM wants only the 'main' name.
1912   return OptoReg::as_OptoReg(regs.first());
1913 }
1914 
1915 // This function identifies sub-graphs in which a 'load' node is
1916 // input to two different nodes, and such that it can be matched
1917 // with BMI instructions like blsi, blsr, etc.
1918 // Example : for b = -a[i] & a[i] can be matched to blsi r32, m32.
1919 // The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL*
1920 // refers to the same node.
1921 #ifdef X86
1922 // Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop)
1923 // This is a temporary solution until we make DAGs expressible in ADL.
1924 template<typename ConType>
1925 class FusedPatternMatcher {
1926   Node* _op1_node;
1927   Node* _mop_node;
1928   Opcodes _con_op;
1929 
1930   static int match_next(Node* n, Opcodes next_op, int next_op_idx) {
1931     if (n->in(1) == NULL || n->in(2) == NULL) {
1932       return -1;
1933     }
1934 
1935     if (next_op_idx == -1) { // n is commutative, try rotations
1936       if (n->in(1)->Opcode() == next_op) {
1937         return 1;
1938       } else if (n->in(2)->Opcode() == next_op) {
1939         return 2;
1940       }
1941     } else {
1942       assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
1943       if (n->in(next_op_idx)->Opcode() == next_op) {
1944         return next_op_idx;
1945       }
1946     }
1947     return -1;
1948   }
1949 public:
1950   FusedPatternMatcher(Node* op1_node, Node *mop_node, Opcodes con_op) :
1951     _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
1952 
1953   bool match(Opcodes op1, int op1_op2_idx,  // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
1954              Opcodes op2, int op2_con_idx,  // op2 and the index of the op2->con edge, -1 if op2 is commutative
1955              typename ConType::NativeType con_value) {
1956     if (_op1_node->Opcode() != op1) {
1957       return false;
1958     }
1959     if (_mop_node->outcnt() > 2) {
1960       return false;
1961     }
1962     op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
1963     if (op1_op2_idx == -1) {
1964       return false;
1965     }
1966     // Memory operation must be the other edge
1967     int op1_mop_idx = (op1_op2_idx & 1) + 1;
1968 
1969     // Check that the mop node is really what we want
1970     if (_op1_node->in(op1_mop_idx) == _mop_node) {
1971       Node *op2_node = _op1_node->in(op1_op2_idx);
1972       if (op2_node->outcnt() > 1) {
1973         return false;
1974       }
1975       assert(op2_node->Opcode() == op2, "Should be");
1976       op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
1977       if (op2_con_idx == -1) {
1978         return false;
1979       }
1980       // Memory operation must be the other edge
1981       int op2_mop_idx = (op2_con_idx & 1) + 1;
1982       // Check that the memory operation is the same node
1983       if (op2_node->in(op2_mop_idx) == _mop_node) {
1984         // Now check the constant
1985         const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
1986         if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
1987           return true;
1988         }
1989       }
1990     }
1991     return false;
1992   }
1993 };
1994 
1995 
1996 bool Matcher::is_bmi_pattern(Node *n, Node *m) {
1997   if (n != NULL && m != NULL) {
1998     if (m->Opcode() == Opcodes::Op_LoadI) {
1999       FusedPatternMatcher<TypeInt> bmii(n, m, Opcodes::Op_ConI);
2000       return bmii.match(Opcodes::Op_AndI, -1, Opcodes::Op_SubI,  1,  0)  ||
2001              bmii.match(Opcodes::Op_AndI, -1, Opcodes::Op_AddI, -1, -1)  ||
2002              bmii.match(Opcodes::Op_XorI, -1, Opcodes::Op_AddI, -1, -1);
2003     } else if (m->Opcode() == Opcodes::Op_LoadL) {
2004       FusedPatternMatcher<TypeLong> bmil(n, m, Opcodes::Op_ConL);
2005       return bmil.match(Opcodes::Op_AndL, -1, Opcodes::Op_SubL,  1,  0) ||
2006              bmil.match(Opcodes::Op_AndL, -1, Opcodes::Op_AddL, -1, -1) ||
2007              bmil.match(Opcodes::Op_XorL, -1, Opcodes::Op_AddL, -1, -1);
2008     }
2009   }
2010   return false;
2011 }
2012 #endif // X86
2013 
2014 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2015   Node *off = m->in(AddPNode::Offset);
2016   if (off->is_Con()) {
2017     address_visited.test_set(m->_idx); // Flag as address_visited
2018     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2019     // Clone X+offset as it also folds into most addressing expressions
2020     mstack.push(off, Visit);
2021     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2022     return true;
2023   }
2024   return false;
2025 }
2026 
2027 // A method-klass-holder may be passed in the inline_cache_reg
2028 // and then expanded into the inline_cache_reg and a method_oop register
2029 //   defined in ad_<arch>.cpp
2030 
2031 //------------------------------find_shared------------------------------------
2032 // Set bits if Node is shared or otherwise a root
2033 void Matcher::find_shared( Node *n ) {
2034   // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2035   MStack mstack(C->live_nodes() * 2);
2036   // Mark nodes as address_visited if they are inputs to an address expression
2037   VectorSet address_visited(Thread::current()->resource_area());
2038   mstack.push(n, Visit);     // Don't need to pre-visit root node
2039   while (mstack.is_nonempty()) {
2040     n = mstack.node();       // Leave node on stack
2041     Node_State nstate = mstack.state();
2042     Opcodes nop = n->Opcode();
2043     if (nstate == Pre_Visit) {
2044       if (address_visited.test(n->_idx)) { // Visited in address already?
2045         // Flag as visited and shared now.
2046         set_visited(n);
2047       }
2048       if (is_visited(n)) {   // Visited already?
2049         // Node is shared and has no reason to clone.  Flag it as shared.
2050         // This causes it to match into a register for the sharing.
2051         set_shared(n);       // Flag as shared and
2052         mstack.pop();        // remove node from stack
2053         continue;
2054       }
2055       nstate = Visit; // Not already visited; so visit now
2056     }
2057     if (nstate == Visit) {
2058       mstack.set_state(Post_Visit);
2059       set_visited(n);   // Flag as visited now
2060       bool mem_op = false;
2061 
2062       switch( nop ) {  // Handle some opcodes special
2063       case Opcodes::Op_Phi:             // Treat Phis as shared roots
2064       case Opcodes::Op_Parm:
2065       case Opcodes::Op_Proj:            // All handled specially during matching
2066       case Opcodes::Op_SafePointScalarObject:
2067         set_shared(n);
2068         set_dontcare(n);
2069         break;
2070       case Opcodes::Op_If:
2071       case Opcodes::Op_CountedLoopEnd:
2072         mstack.set_state(Alt_Post_Visit); // Alternative way
2073         // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)).  Helps
2074         // with matching cmp/branch in 1 instruction.  The Matcher needs the
2075         // Bool and CmpX side-by-side, because it can only get at constants
2076         // that are at the leaves of Match trees, and the Bool's condition acts
2077         // as a constant here.
2078         mstack.push(n->in(1), Visit);         // Clone the Bool
2079         mstack.push(n->in(0), Pre_Visit);     // Visit control input
2080         continue; // while (mstack.is_nonempty())
2081       case Opcodes::Op_ConvI2D:         // These forms efficiently match with a prior
2082       case Opcodes::Op_ConvI2F:         //   Load but not a following Store
2083         if( n->in(1)->is_Load() &&        // Prior load
2084             n->outcnt() == 1 &&           // Not already shared
2085             n->unique_out()->is_Store() ) // Following store
2086           set_shared(n);       // Force it to be a root
2087         break;
2088       case Opcodes::Op_ReverseBytesI:
2089       case Opcodes::Op_ReverseBytesL:
2090         if( n->in(1)->is_Load() &&        // Prior load
2091             n->outcnt() == 1 )            // Not already shared
2092           set_shared(n);                  // Force it to be a root
2093         break;
2094       case Opcodes::Op_BoxLock:         // Cant match until we get stack-regs in ADLC
2095       case Opcodes::Op_IfFalse:
2096       case Opcodes::Op_IfTrue:
2097       case Opcodes::Op_MachProj:
2098       case Opcodes::Op_MergeMem:
2099       case Opcodes::Op_Catch:
2100       case Opcodes::Op_CatchProj:
2101       case Opcodes::Op_CProj:
2102       case Opcodes::Op_JumpProj:
2103       case Opcodes::Op_JProj:
2104       case Opcodes::Op_NeverBranch:
2105         set_dontcare(n);
2106         break;
2107       case Opcodes::Op_Jump:
2108         mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2109         mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2110         continue;                             // while (mstack.is_nonempty())
2111       case Opcodes::Op_StrComp:
2112       case Opcodes::Op_StrEquals:
2113       case Opcodes::Op_StrIndexOf:
2114       case Opcodes::Op_StrIndexOfChar:
2115       case Opcodes::Op_AryEq:
2116       case Opcodes::Op_HasNegatives:
2117       case Opcodes::Op_StrInflatedCopy:
2118       case Opcodes::Op_StrCompressedCopy:
2119       case Opcodes::Op_EncodeISOArray:
2120         set_shared(n); // Force result into register (it will be anyways)
2121         break;
2122       case Opcodes::Op_ConP: {  // Convert pointers above the centerline to NUL
2123         TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2124         const TypePtr* tp = tn->type()->is_ptr();
2125         if (tp->_ptr == TypePtr::AnyNull) {
2126           tn->set_type(TypePtr::NULL_PTR);
2127         }
2128         break;
2129       }
2130       case Opcodes::Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2131         TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2132         const TypePtr* tp = tn->type()->make_ptr();
2133         if (tp && tp->_ptr == TypePtr::AnyNull) {
2134           tn->set_type(TypeNarrowOop::NULL_PTR);
2135         }
2136         break;
2137       }
2138       case Opcodes::Op_Binary:         // These are introduced in the Post_Visit state.
2139         ShouldNotReachHere();
2140         break;
2141       case Opcodes::Op_ClearArray:
2142       case Opcodes::Op_SafePoint:
2143         mem_op = true;
2144         break;
2145       default:
2146         if( n->is_Store() ) {
2147           // Do match stores, despite no ideal reg
2148           mem_op = true;
2149           break;
2150         }
2151         if( n->is_Mem() ) { // Loads and LoadStores
2152           mem_op = true;
2153           // Loads must be root of match tree due to prior load conflict
2154           if( C->subsume_loads() == false )
2155             set_shared(n);
2156         }
2157         // Fall into default case
2158         if( n->ideal_reg() != Opcodes::Op_Node )
2159           set_dontcare(n);  // Unmatchable Nodes
2160       } // end_switch
2161 
2162       for(int i = n->req() - 1; i >= 0; --i) { // For my children
2163         Node *m = n->in(i); // Get ith input
2164         if (m == NULL) continue;  // Ignore NULLs
2165         Opcodes mop = m->Opcode();
2166 
2167         // Must clone all producers of flags, or we will not match correctly.
2168         // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2169         // then it will match into an ideal Op_RegFlags.  Alas, the fp-flags
2170         // are also there, so we may match a float-branch to int-flags and
2171         // expect the allocator to haul the flags from the int-side to the
2172         // fp-side.  No can do.
2173         if( _must_clone[static_cast<uint>(mop)] ) {
2174           mstack.push(m, Visit);
2175           continue; // for(int i = ...)
2176         }
2177 
2178         if( mop == Opcodes::Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
2179           // Bases used in addresses must be shared but since
2180           // they are shared through a DecodeN they may appear
2181           // to have a single use so force sharing here.
2182           set_shared(m->in(AddPNode::Base)->in(1));
2183         }
2184 
2185         // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
2186 #ifdef X86
2187         if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
2188           mstack.push(m, Visit);
2189           continue;
2190         }
2191 #endif
2192 
2193         // Clone addressing expressions as they are "free" in memory access instructions
2194         if (mem_op && i == MemNode::Address && mop == Opcodes::Op_AddP &&
2195             // When there are other uses besides address expressions
2196             // put it on stack and mark as shared.
2197             !is_visited(m)) {
2198           // Some inputs for address expression are not put on stack
2199           // to avoid marking them as shared and forcing them into register
2200           // if they are used only in address expressions.
2201           // But they should be marked as shared if there are other uses
2202           // besides address expressions.
2203 
2204           if (clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2205             continue;
2206           }
2207         }   // if( mem_op &&
2208         mstack.push(m, Pre_Visit);
2209       }     // for(int i = ...)
2210     }
2211     else if (nstate == Alt_Post_Visit) {
2212       mstack.pop(); // Remove node from stack
2213       // We cannot remove the Cmp input from the Bool here, as the Bool may be
2214       // shared and all users of the Bool need to move the Cmp in parallel.
2215       // This leaves both the Bool and the If pointing at the Cmp.  To
2216       // prevent the Matcher from trying to Match the Cmp along both paths
2217       // BoolNode::match_edge always returns a zero.
2218 
2219       // We reorder the Op_If in a pre-order manner, so we can visit without
2220       // accidentally sharing the Cmp (the Bool and the If make 2 users).
2221       n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2222     }
2223     else if (nstate == Post_Visit) {
2224       mstack.pop(); // Remove node from stack
2225 
2226       // Now hack a few special opcodes
2227       switch( n->Opcode() ) {       // Handle some opcodes special
2228       case Opcodes::Op_StorePConditional:
2229       case Opcodes::Op_StoreIConditional:
2230       case Opcodes::Op_StoreLConditional:
2231       case Opcodes::Op_CompareAndExchangeB:
2232       case Opcodes::Op_CompareAndExchangeS:
2233       case Opcodes::Op_CompareAndExchangeI:
2234       case Opcodes::Op_CompareAndExchangeL:
2235       case Opcodes::Op_CompareAndExchangeP:
2236       case Opcodes::Op_CompareAndExchangeN:
2237       case Opcodes::Op_WeakCompareAndSwapB:
2238       case Opcodes::Op_WeakCompareAndSwapS:
2239       case Opcodes::Op_WeakCompareAndSwapI:
2240       case Opcodes::Op_WeakCompareAndSwapL:
2241       case Opcodes::Op_WeakCompareAndSwapP:
2242       case Opcodes::Op_WeakCompareAndSwapN:
2243       case Opcodes::Op_CompareAndSwapB:
2244       case Opcodes::Op_CompareAndSwapS:
2245       case Opcodes::Op_CompareAndSwapI:
2246       case Opcodes::Op_CompareAndSwapL:
2247       case Opcodes::Op_CompareAndSwapP:
2248       case Opcodes::Op_CompareAndSwapN: {   // Convert trinary to binary-tree
2249         Node *newval = n->in(MemNode::ValueIn );
2250         Node *oldval  = n->in(LoadStoreConditionalNode::ExpectedIn);
2251         Node *pair = new BinaryNode( oldval, newval );
2252         n->set_req(MemNode::ValueIn,pair);
2253         n->del_req(LoadStoreConditionalNode::ExpectedIn);
2254         break;
2255       }
2256       case Opcodes::Op_CMoveD:              // Convert trinary to binary-tree
2257       case Opcodes::Op_CMoveF:
2258       case Opcodes::Op_CMoveI:
2259       case Opcodes::Op_CMoveL:
2260       case Opcodes::Op_CMoveN:
2261       case Opcodes::Op_CMoveP:
2262       case Opcodes::Op_CMoveVD:  {
2263         // Restructure into a binary tree for Matching.  It's possible that
2264         // we could move this code up next to the graph reshaping for IfNodes
2265         // or vice-versa, but I do not want to debug this for Ladybird.
2266         // 10/2/2000 CNC.
2267         Node *pair1 = new BinaryNode(n->in(1),n->in(1)->in(1));
2268         n->set_req(1,pair1);
2269         Node *pair2 = new BinaryNode(n->in(2),n->in(3));
2270         n->set_req(2,pair2);
2271         n->del_req(3);
2272         break;
2273       }
2274       case Opcodes::Op_LoopLimit: {
2275         Node *pair1 = new BinaryNode(n->in(1),n->in(2));
2276         n->set_req(1,pair1);
2277         n->set_req(2,n->in(3));
2278         n->del_req(3);
2279         break;
2280       }
2281       case Opcodes::Op_StrEquals:
2282       case Opcodes::Op_StrIndexOfChar: {
2283         Node *pair1 = new BinaryNode(n->in(2),n->in(3));
2284         n->set_req(2,pair1);
2285         n->set_req(3,n->in(4));
2286         n->del_req(4);
2287         break;
2288       }
2289       case Opcodes::Op_StrComp:
2290       case Opcodes::Op_StrIndexOf: {
2291         Node *pair1 = new BinaryNode(n->in(2),n->in(3));
2292         n->set_req(2,pair1);
2293         Node *pair2 = new BinaryNode(n->in(4),n->in(5));
2294         n->set_req(3,pair2);
2295         n->del_req(5);
2296         n->del_req(4);
2297         break;
2298       }
2299       case Opcodes::Op_StrCompressedCopy:
2300       case Opcodes::Op_StrInflatedCopy:
2301       case Opcodes::Op_EncodeISOArray: {
2302         // Restructure into a binary tree for Matching.
2303         Node* pair = new BinaryNode(n->in(3), n->in(4));
2304         n->set_req(3, pair);
2305         n->del_req(4);
2306         break;
2307       }
2308       default:
2309         break;
2310       }
2311     }
2312     else {
2313       ShouldNotReachHere();
2314     }
2315   } // end of while (mstack.is_nonempty())
2316 }
2317 
2318 #ifdef ASSERT
2319 // machine-independent root to machine-dependent root
2320 void Matcher::dump_old2new_map() {
2321   _old2new_map.dump();
2322 }
2323 #endif
2324 
2325 //---------------------------collect_null_checks-------------------------------
2326 // Find null checks in the ideal graph; write a machine-specific node for
2327 // it.  Used by later implicit-null-check handling.  Actually collects
2328 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2329 // value being tested.
2330 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2331   Node *iff = proj->in(0);
2332   if( iff->Opcode() == Opcodes::Op_If ) {
2333     // During matching If's have Bool & Cmp side-by-side
2334     BoolNode *b = iff->in(1)->as_Bool();
2335     Node *cmp = iff->in(2);
2336     Opcodes opc = cmp->Opcode();
2337     if (opc != Opcodes::Op_CmpP && opc != Opcodes::Op_CmpN) return;
2338 
2339     const Type* ct = cmp->in(2)->bottom_type();
2340     if (ct == TypePtr::NULL_PTR ||
2341         (opc == Opcodes::Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2342 
2343       bool push_it = false;
2344       if( proj->Opcode() == Opcodes::Op_IfTrue ) {
2345 #ifndef PRODUCT
2346         extern int all_null_checks_found;
2347         all_null_checks_found++;
2348 #endif
2349         if( b->_test._test == BoolTest::ne ) {
2350           push_it = true;
2351         }
2352       } else {
2353         assert( proj->Opcode() == Opcodes::Op_IfFalse, "" );
2354         if( b->_test._test == BoolTest::eq ) {
2355           push_it = true;
2356         }
2357       }
2358       if( push_it ) {
2359         _null_check_tests.push(proj);
2360         Node* val = cmp->in(1);
2361 #ifdef _LP64
2362         if (val->bottom_type()->isa_narrowoop() &&
2363             !Matcher::narrow_oop_use_complex_address()) {
2364           //
2365           // Look for DecodeN node which should be pinned to orig_proj.
2366           // On platforms (Sparc) which can not handle 2 adds
2367           // in addressing mode we have to keep a DecodeN node and
2368           // use it to do implicit NULL check in address.
2369           //
2370           // DecodeN node was pinned to non-null path (orig_proj) during
2371           // CastPP transformation in final_graph_reshaping_impl().
2372           //
2373           uint cnt = orig_proj->outcnt();
2374           for (uint i = 0; i < orig_proj->outcnt(); i++) {
2375             Node* d = orig_proj->raw_out(i);
2376             if (d->is_DecodeN() && d->in(1) == val) {
2377               val = d;
2378               val->set_req(0, NULL); // Unpin now.
2379               // Mark this as special case to distinguish from
2380               // a regular case: CmpP(DecodeN, NULL).
2381               val = (Node*)(((intptr_t)val) | 1);
2382               break;
2383             }
2384           }
2385         }
2386 #endif
2387         _null_check_tests.push(val);
2388       }
2389     }
2390   }
2391 }
2392 
2393 //---------------------------validate_null_checks------------------------------
2394 // Its possible that the value being NULL checked is not the root of a match
2395 // tree.  If so, I cannot use the value in an implicit null check.
2396 void Matcher::validate_null_checks( ) {
2397   uint cnt = _null_check_tests.size();
2398   for( uint i=0; i < cnt; i+=2 ) {
2399     Node *test = _null_check_tests[i];
2400     Node *val = _null_check_tests[i+1];
2401     bool is_decoden = ((intptr_t)val) & 1;
2402     val = (Node*)(((intptr_t)val) & ~1);
2403     if (has_new_node(val)) {
2404       Node* new_val = new_node(val);
2405       if (is_decoden) {
2406         assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2407         // Note: new_val may have a control edge if
2408         // the original ideal node DecodeN was matched before
2409         // it was unpinned in Matcher::collect_null_checks().
2410         // Unpin the mach node and mark it.
2411         new_val->set_req(0, NULL);
2412         new_val = (Node*)(((intptr_t)new_val) | 1);
2413       }
2414       // Is a match-tree root, so replace with the matched value
2415       _null_check_tests.map(i+1, new_val);
2416     } else {
2417       // Yank from candidate list
2418       _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2419       _null_check_tests.map(i,_null_check_tests[--cnt]);
2420       _null_check_tests.pop();
2421       _null_check_tests.pop();
2422       i-=2;
2423     }
2424   }
2425 }
2426 
2427 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
2428 // atomic instruction acting as a store_load barrier without any
2429 // intervening volatile load, and thus we don't need a barrier here.
2430 // We retain the Node to act as a compiler ordering barrier.
2431 bool Matcher::post_store_load_barrier(const Node* vmb) {
2432   Compile* C = Compile::current();
2433   assert(vmb->is_MemBar(), "");
2434   assert(vmb->Opcode() != Opcodes::Op_MemBarAcquire && vmb->Opcode() != Opcodes::Op_LoadFence, "");
2435   const MemBarNode* membar = vmb->as_MemBar();
2436 
2437   // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2438   Node* ctrl = NULL;
2439   for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2440     Node* p = membar->fast_out(i);
2441     assert(p->is_Proj(), "only projections here");
2442     if ((p->as_Proj()->_con == TypeFunc::Control) &&
2443         !C->node_arena()->contains(p)) { // Unmatched old-space only
2444       ctrl = p;
2445       break;
2446     }
2447   }
2448   assert((ctrl != NULL), "missing control projection");
2449 
2450   for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2451     Node *x = ctrl->fast_out(j);
2452     Opcodes xop = x->Opcode();
2453 
2454     // We don't need current barrier if we see another or a lock
2455     // before seeing volatile load.
2456     //
2457     // Op_Fastunlock previously appeared in the Op_* list below.
2458     // With the advent of 1-0 lock operations we're no longer guaranteed
2459     // that a monitor exit operation contains a serializing instruction.
2460 
2461     if (xop == Opcodes::Op_MemBarVolatile ||
2462         xop == Opcodes::Op_CompareAndExchangeB ||
2463         xop == Opcodes::Op_CompareAndExchangeS ||
2464         xop == Opcodes::Op_CompareAndExchangeI ||
2465         xop == Opcodes::Op_CompareAndExchangeL ||
2466         xop == Opcodes::Op_CompareAndExchangeP ||
2467         xop == Opcodes::Op_CompareAndExchangeN ||
2468         xop == Opcodes::Op_WeakCompareAndSwapB ||
2469         xop == Opcodes::Op_WeakCompareAndSwapS ||
2470         xop == Opcodes::Op_WeakCompareAndSwapL ||
2471         xop == Opcodes::Op_WeakCompareAndSwapP ||
2472         xop == Opcodes::Op_WeakCompareAndSwapN ||
2473         xop == Opcodes::Op_WeakCompareAndSwapI ||
2474         xop == Opcodes::Op_CompareAndSwapB ||
2475         xop == Opcodes::Op_CompareAndSwapS ||
2476         xop == Opcodes::Op_CompareAndSwapL ||
2477         xop == Opcodes::Op_CompareAndSwapP ||
2478         xop == Opcodes::Op_CompareAndSwapN ||
2479         xop == Opcodes::Op_CompareAndSwapI) {
2480       return true;
2481     }
2482 
2483     // Op_FastLock previously appeared in the Op_* list above.
2484     // With biased locking we're no longer guaranteed that a monitor
2485     // enter operation contains a serializing instruction.
2486     if ((xop == Opcodes::Op_FastLock) && !UseBiasedLocking) {
2487       return true;
2488     }
2489 
2490     if (x->is_MemBar()) {
2491       // We must retain this membar if there is an upcoming volatile
2492       // load, which will be followed by acquire membar.
2493       if (xop == Opcodes::Op_MemBarAcquire || xop == Opcodes::Op_LoadFence) {
2494         return false;
2495       } else {
2496         // For other kinds of barriers, check by pretending we
2497         // are them, and seeing if we can be removed.
2498         return post_store_load_barrier(x->as_MemBar());
2499       }
2500     }
2501 
2502     // probably not necessary to check for these
2503     if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2504       return false;
2505     }
2506   }
2507   return false;
2508 }
2509 
2510 // Check whether node n is a branch to an uncommon trap that we could
2511 // optimize as test with very high branch costs in case of going to
2512 // the uncommon trap. The code must be able to be recompiled to use
2513 // a cheaper test.
2514 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2515   // Don't do it for natives, adapters, or runtime stubs
2516   Compile *C = Compile::current();
2517   if (!C->is_method_compilation()) return false;
2518 
2519   assert(n->is_If(), "You should only call this on if nodes.");
2520   IfNode *ifn = n->as_If();
2521 
2522   Node *ifFalse = NULL;
2523   for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2524     if (ifn->fast_out(i)->is_IfFalse()) {
2525       ifFalse = ifn->fast_out(i);
2526       break;
2527     }
2528   }
2529   assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2530 
2531   Node *reg = ifFalse;
2532   int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
2533                // Alternatively use visited set?  Seems too expensive.
2534   while (reg != NULL && cnt > 0) {
2535     CallNode *call = NULL;
2536     RegionNode *nxt_reg = NULL;
2537     for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2538       Node *o = reg->fast_out(i);
2539       if (o->is_Call()) {
2540         call = o->as_Call();
2541       }
2542       if (o->is_Region()) {
2543         nxt_reg = o->as_Region();
2544       }
2545     }
2546 
2547     if (call &&
2548         call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2549       const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2550       if (trtype->isa_int() && trtype->is_int()->is_con()) {
2551         jint tr_con = trtype->is_int()->get_con();
2552         Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2553         Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2554         assert((int)reason < (int)BitsPerInt, "recode bit map");
2555 
2556         if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2557             && action != Deoptimization::Action_none) {
2558           // This uncommon trap is sure to recompile, eventually.
2559           // When that happens, C->too_many_traps will prevent
2560           // this transformation from happening again.
2561           return true;
2562         }
2563       }
2564     }
2565 
2566     reg = nxt_reg;
2567     cnt--;
2568   }
2569 
2570   return false;
2571 }
2572 
2573 //=============================================================================
2574 //---------------------------State---------------------------------------------
2575 State::State(void) {
2576 #ifdef ASSERT
2577   _id = 0;
2578   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2579   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2580   //memset(_cost, -1, sizeof(_cost));
2581   //memset(_rule, -1, sizeof(_rule));
2582 #endif
2583   memset(_valid, 0, sizeof(_valid));
2584 }
2585 
2586 #ifdef ASSERT
2587 State::~State() {
2588   _id = 99;
2589   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2590   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2591   memset(_cost, -3, sizeof(_cost));
2592   memset(_rule, -3, sizeof(_rule));
2593 }
2594 #endif
2595 
2596 #ifndef PRODUCT
2597 //---------------------------dump----------------------------------------------
2598 void State::dump() {
2599   tty->print("\n");
2600   dump(0);
2601 }
2602 
2603 void State::dump(int depth) {
2604   for( int j = 0; j < depth; j++ )
2605     tty->print("   ");
2606   tty->print("--N: ");
2607   _leaf->dump();
2608   uint i;
2609   for( i = 0; i < _LAST_MACH_OPER; i++ )
2610     // Check for valid entry
2611     if( valid(i) ) {
2612       for( int j = 0; j < depth; j++ )
2613         tty->print("   ");
2614         assert(_cost[i] != max_juint, "cost must be a valid value");
2615         assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2616         tty->print_cr("%s  %d  %s",
2617                       ruleName[i], _cost[i], ruleName[_rule[i]] );
2618       }
2619   tty->cr();
2620 
2621   for( i=0; i<2; i++ )
2622     if( _kids[i] )
2623       _kids[i]->dump(depth+1);
2624 }
2625 #endif