1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "opto/callGenerator.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/escape.hpp"
  34 #include "opto/locknode.hpp"
  35 #include "opto/machnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/regalloc.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 
  43 // Portions of code courtesy of Clifford Click
  44 
  45 // Optimization - Graph Style
  46 
  47 //=============================================================================
  48 uint StartNode::size_of() const { return sizeof(*this); }
  49 uint StartNode::cmp( const Node &n ) const
  50 { return _domain == ((StartNode&)n)._domain; }
  51 const Type *StartNode::bottom_type() const { return _domain; }
  52 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
  53 #ifndef PRODUCT
  54 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  55 #endif
  56 
  57 //------------------------------Ideal------------------------------------------
  58 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  59   return remove_dead_region(phase, can_reshape) ? this : NULL;
  60 }
  61 
  62 //------------------------------calling_convention-----------------------------
  63 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
  64   Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
  65 }
  66 
  67 //------------------------------Registers--------------------------------------
  68 const RegMask &StartNode::in_RegMask(uint) const {
  69   return RegMask::Empty;
  70 }
  71 
  72 //------------------------------match------------------------------------------
  73 // Construct projections for incoming parameters, and their RegMask info
  74 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
  75   switch (proj->_con) {
  76   case TypeFunc::Control:
  77   case TypeFunc::I_O:
  78   case TypeFunc::Memory:
  79     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  80   case TypeFunc::FramePtr:
  81     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  82   case TypeFunc::ReturnAdr:
  83     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  84   case TypeFunc::Parms:
  85   default: {
  86       uint parm_num = proj->_con - TypeFunc::Parms;
  87       const Type *t = _domain->field_at(proj->_con);
  88       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  89         return new ConNode(Type::TOP);
  90       uint ideal_reg = t->ideal_reg();
  91       RegMask &rm = match->_calling_convention_mask[parm_num];
  92       return new MachProjNode(this,proj->_con,rm,ideal_reg);
  93     }
  94   }
  95   return NULL;
  96 }
  97 
  98 //------------------------------StartOSRNode----------------------------------
  99 // The method start node for an on stack replacement adapter
 100 
 101 //------------------------------osr_domain-----------------------------
 102 const TypeTuple *StartOSRNode::osr_domain() {
 103   const Type **fields = TypeTuple::fields(2);
 104   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM;  // address of osr buffer
 105 
 106   return TypeTuple::make(TypeFunc::Parms+1, fields);
 107 }
 108 
 109 //=============================================================================
 110 const char * const ParmNode::names[TypeFunc::Parms+1] = {
 111   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
 112 };
 113 
 114 #ifndef PRODUCT
 115 void ParmNode::dump_spec(outputStream *st) const {
 116   if( _con < TypeFunc::Parms ) {
 117     st->print("%s", names[_con]);
 118   } else {
 119     st->print("Parm%d: ",_con-TypeFunc::Parms);
 120     // Verbose and WizardMode dump bottom_type for all nodes
 121     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
 122   }
 123 }
 124 #endif
 125 
 126 uint ParmNode::ideal_reg() const {
 127   switch( _con ) {
 128   case TypeFunc::Control  : // fall through
 129   case TypeFunc::I_O      : // fall through
 130   case TypeFunc::Memory   : return 0;
 131   case TypeFunc::FramePtr : // fall through
 132   case TypeFunc::ReturnAdr: return Op_RegP;
 133   default                 : assert( _con > TypeFunc::Parms, "" );
 134     // fall through
 135   case TypeFunc::Parms    : {
 136     // Type of argument being passed
 137     const Type *t = in(0)->as_Start()->_domain->field_at(_con);
 138     return t->ideal_reg();
 139   }
 140   }
 141   ShouldNotReachHere();
 142   return 0;
 143 }
 144 
 145 //=============================================================================
 146 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
 147   init_req(TypeFunc::Control,cntrl);
 148   init_req(TypeFunc::I_O,i_o);
 149   init_req(TypeFunc::Memory,memory);
 150   init_req(TypeFunc::FramePtr,frameptr);
 151   init_req(TypeFunc::ReturnAdr,retadr);
 152 }
 153 
 154 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
 155   return remove_dead_region(phase, can_reshape) ? this : NULL;
 156 }
 157 
 158 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
 159   return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
 160     ? Type::TOP
 161     : Type::BOTTOM;
 162 }
 163 
 164 // Do we Match on this edge index or not?  No edges on return nodes
 165 uint ReturnNode::match_edge(uint idx) const {
 166   return 0;
 167 }
 168 
 169 
 170 #ifndef PRODUCT
 171 void ReturnNode::dump_req(outputStream *st) const {
 172   // Dump the required inputs, enclosed in '(' and ')'
 173   uint i;                       // Exit value of loop
 174   for (i = 0; i < req(); i++) {    // For all required inputs
 175     if (i == TypeFunc::Parms) st->print("returns");
 176     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 177     else st->print("_ ");
 178   }
 179 }
 180 #endif
 181 
 182 //=============================================================================
 183 RethrowNode::RethrowNode(
 184   Node* cntrl,
 185   Node* i_o,
 186   Node* memory,
 187   Node* frameptr,
 188   Node* ret_adr,
 189   Node* exception
 190 ) : Node(TypeFunc::Parms + 1) {
 191   init_req(TypeFunc::Control  , cntrl    );
 192   init_req(TypeFunc::I_O      , i_o      );
 193   init_req(TypeFunc::Memory   , memory   );
 194   init_req(TypeFunc::FramePtr , frameptr );
 195   init_req(TypeFunc::ReturnAdr, ret_adr);
 196   init_req(TypeFunc::Parms    , exception);
 197 }
 198 
 199 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
 200   return remove_dead_region(phase, can_reshape) ? this : NULL;
 201 }
 202 
 203 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
 204   return (phase->type(in(TypeFunc::Control)) == Type::TOP)
 205     ? Type::TOP
 206     : Type::BOTTOM;
 207 }
 208 
 209 uint RethrowNode::match_edge(uint idx) const {
 210   return 0;
 211 }
 212 
 213 #ifndef PRODUCT
 214 void RethrowNode::dump_req(outputStream *st) const {
 215   // Dump the required inputs, enclosed in '(' and ')'
 216   uint i;                       // Exit value of loop
 217   for (i = 0; i < req(); i++) {    // For all required inputs
 218     if (i == TypeFunc::Parms) st->print("exception");
 219     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 220     else st->print("_ ");
 221   }
 222 }
 223 #endif
 224 
 225 //=============================================================================
 226 // Do we Match on this edge index or not?  Match only target address & method
 227 uint TailCallNode::match_edge(uint idx) const {
 228   return TypeFunc::Parms <= idx  &&  idx <= TypeFunc::Parms+1;
 229 }
 230 
 231 //=============================================================================
 232 // Do we Match on this edge index or not?  Match only target address & oop
 233 uint TailJumpNode::match_edge(uint idx) const {
 234   return TypeFunc::Parms <= idx  &&  idx <= TypeFunc::Parms+1;
 235 }
 236 
 237 //=============================================================================
 238 JVMState::JVMState(ciMethod* method, JVMState* caller) :
 239   _method(method) {
 240   assert(method != NULL, "must be valid call site");
 241   _reexecute = Reexecute_Undefined;
 242   debug_only(_bci = -99);  // random garbage value
 243   debug_only(_map = (SafePointNode*)-1);
 244   _caller = caller;
 245   _depth  = 1 + (caller == NULL ? 0 : caller->depth());
 246   _locoff = TypeFunc::Parms;
 247   _stkoff = _locoff + _method->max_locals();
 248   _monoff = _stkoff + _method->max_stack();
 249   _scloff = _monoff;
 250   _endoff = _monoff;
 251   _sp = 0;
 252 }
 253 JVMState::JVMState(int stack_size) :
 254   _method(NULL) {
 255   _bci = InvocationEntryBci;
 256   _reexecute = Reexecute_Undefined;
 257   debug_only(_map = (SafePointNode*)-1);
 258   _caller = NULL;
 259   _depth  = 1;
 260   _locoff = TypeFunc::Parms;
 261   _stkoff = _locoff;
 262   _monoff = _stkoff + stack_size;
 263   _scloff = _monoff;
 264   _endoff = _monoff;
 265   _sp = 0;
 266 }
 267 
 268 //--------------------------------of_depth-------------------------------------
 269 JVMState* JVMState::of_depth(int d) const {
 270   const JVMState* jvmp = this;
 271   assert(0 < d && (uint)d <= depth(), "oob");
 272   for (int skip = depth() - d; skip > 0; skip--) {
 273     jvmp = jvmp->caller();
 274   }
 275   assert(jvmp->depth() == (uint)d, "found the right one");
 276   return (JVMState*)jvmp;
 277 }
 278 
 279 //-----------------------------same_calls_as-----------------------------------
 280 bool JVMState::same_calls_as(const JVMState* that) const {
 281   if (this == that)                    return true;
 282   if (this->depth() != that->depth())  return false;
 283   const JVMState* p = this;
 284   const JVMState* q = that;
 285   for (;;) {
 286     if (p->_method != q->_method)    return false;
 287     if (p->_method == NULL)          return true;   // bci is irrelevant
 288     if (p->_bci    != q->_bci)       return false;
 289     if (p->_reexecute != q->_reexecute)  return false;
 290     p = p->caller();
 291     q = q->caller();
 292     if (p == q)                      return true;
 293     assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
 294   }
 295 }
 296 
 297 //------------------------------debug_start------------------------------------
 298 uint JVMState::debug_start()  const {
 299   debug_only(JVMState* jvmroot = of_depth(1));
 300   assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
 301   return of_depth(1)->locoff();
 302 }
 303 
 304 //-------------------------------debug_end-------------------------------------
 305 uint JVMState::debug_end() const {
 306   debug_only(JVMState* jvmroot = of_depth(1));
 307   assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
 308   return endoff();
 309 }
 310 
 311 //------------------------------debug_depth------------------------------------
 312 uint JVMState::debug_depth() const {
 313   uint total = 0;
 314   for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
 315     total += jvmp->debug_size();
 316   }
 317   return total;
 318 }
 319 
 320 #ifndef PRODUCT
 321 
 322 //------------------------------format_helper----------------------------------
 323 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
 324 // any defined value or not.  If it does, print out the register or constant.
 325 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
 326   if (n == NULL) { st->print(" NULL"); return; }
 327   if (n->is_SafePointScalarObject()) {
 328     // Scalar replacement.
 329     SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
 330     scobjs->append_if_missing(spobj);
 331     int sco_n = scobjs->find(spobj);
 332     assert(sco_n >= 0, "");
 333     st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
 334     return;
 335   }
 336   if (regalloc->node_regs_max_index() > 0 &&
 337       OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
 338     char buf[50];
 339     regalloc->dump_register(n,buf);
 340     st->print(" %s%d]=%s",msg,i,buf);
 341   } else {                      // No register, but might be constant
 342     const Type *t = n->bottom_type();
 343     switch (t->base()) {
 344     case Type::Int:
 345       st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
 346       break;
 347     case Type::AnyPtr:
 348       assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
 349       st->print(" %s%d]=#NULL",msg,i);
 350       break;
 351     case Type::AryPtr:
 352     case Type::InstPtr:
 353       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop()));
 354       break;
 355     case Type::KlassPtr:
 356       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass()));
 357       break;
 358     case Type::MetadataPtr:
 359       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata()));
 360       break;
 361     case Type::NarrowOop:
 362       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop()));
 363       break;
 364     case Type::RawPtr:
 365       st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr()));
 366       break;
 367     case Type::DoubleCon:
 368       st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
 369       break;
 370     case Type::FloatCon:
 371       st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
 372       break;
 373     case Type::Long:
 374       st->print(" %s%d]=#"INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con()));
 375       break;
 376     case Type::Half:
 377     case Type::Top:
 378       st->print(" %s%d]=_",msg,i);
 379       break;
 380     default: ShouldNotReachHere();
 381     }
 382   }
 383 }
 384 
 385 //------------------------------format-----------------------------------------
 386 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
 387   st->print("        #");
 388   if (_method) {
 389     _method->print_short_name(st);
 390     st->print(" @ bci:%d ",_bci);
 391   } else {
 392     st->print_cr(" runtime stub ");
 393     return;
 394   }
 395   if (n->is_MachSafePoint()) {
 396     GrowableArray<SafePointScalarObjectNode*> scobjs;
 397     MachSafePointNode *mcall = n->as_MachSafePoint();
 398     uint i;
 399     // Print locals
 400     for (i = 0; i < (uint)loc_size(); i++)
 401       format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
 402     // Print stack
 403     for (i = 0; i < (uint)stk_size(); i++) {
 404       if ((uint)(_stkoff + i) >= mcall->len())
 405         st->print(" oob ");
 406       else
 407        format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
 408     }
 409     for (i = 0; (int)i < nof_monitors(); i++) {
 410       Node *box = mcall->monitor_box(this, i);
 411       Node *obj = mcall->monitor_obj(this, i);
 412       if (regalloc->node_regs_max_index() > 0 &&
 413           OptoReg::is_valid(regalloc->get_reg_first(box))) {
 414         box = BoxLockNode::box_node(box);
 415         format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
 416       } else {
 417         OptoReg::Name box_reg = BoxLockNode::reg(box);
 418         st->print(" MON-BOX%d=%s+%d",
 419                    i,
 420                    OptoReg::regname(OptoReg::c_frame_pointer),
 421                    regalloc->reg2offset(box_reg));
 422       }
 423       const char* obj_msg = "MON-OBJ[";
 424       if (EliminateLocks) {
 425         if (BoxLockNode::box_node(box)->is_eliminated())
 426           obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
 427       }
 428       format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
 429     }
 430 
 431     for (i = 0; i < (uint)scobjs.length(); i++) {
 432       // Scalar replaced objects.
 433       st->cr();
 434       st->print("        # ScObj" INT32_FORMAT " ", i);
 435       SafePointScalarObjectNode* spobj = scobjs.at(i);
 436       ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
 437       assert(cik->is_instance_klass() ||
 438              cik->is_array_klass(), "Not supported allocation.");
 439       ciInstanceKlass *iklass = NULL;
 440       if (cik->is_instance_klass()) {
 441         cik->print_name_on(st);
 442         iklass = cik->as_instance_klass();
 443       } else if (cik->is_type_array_klass()) {
 444         cik->as_array_klass()->base_element_type()->print_name_on(st);
 445         st->print("[%d]", spobj->n_fields());
 446       } else if (cik->is_obj_array_klass()) {
 447         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 448         if (cie->is_instance_klass()) {
 449           cie->print_name_on(st);
 450         } else if (cie->is_type_array_klass()) {
 451           cie->as_array_klass()->base_element_type()->print_name_on(st);
 452         } else {
 453           ShouldNotReachHere();
 454         }
 455         st->print("[%d]", spobj->n_fields());
 456         int ndim = cik->as_array_klass()->dimension() - 1;
 457         while (ndim-- > 0) {
 458           st->print("[]");
 459         }
 460       }
 461       st->print("={");
 462       uint nf = spobj->n_fields();
 463       if (nf > 0) {
 464         uint first_ind = spobj->first_index(mcall->jvms());
 465         Node* fld_node = mcall->in(first_ind);
 466         ciField* cifield;
 467         if (iklass != NULL) {
 468           st->print(" [");
 469           cifield = iklass->nonstatic_field_at(0);
 470           cifield->print_name_on(st);
 471           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 472         } else {
 473           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 474         }
 475         for (uint j = 1; j < nf; j++) {
 476           fld_node = mcall->in(first_ind+j);
 477           if (iklass != NULL) {
 478             st->print(", [");
 479             cifield = iklass->nonstatic_field_at(j);
 480             cifield->print_name_on(st);
 481             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
 482           } else {
 483             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
 484           }
 485         }
 486       }
 487       st->print(" }");
 488     }
 489   }
 490   st->cr();
 491   if (caller() != NULL) caller()->format(regalloc, n, st);
 492 }
 493 
 494 
 495 void JVMState::dump_spec(outputStream *st) const {
 496   if (_method != NULL) {
 497     bool printed = false;
 498     if (!Verbose) {
 499       // The JVMS dumps make really, really long lines.
 500       // Take out the most boring parts, which are the package prefixes.
 501       char buf[500];
 502       stringStream namest(buf, sizeof(buf));
 503       _method->print_short_name(&namest);
 504       if (namest.count() < sizeof(buf)) {
 505         const char* name = namest.base();
 506         if (name[0] == ' ')  ++name;
 507         const char* endcn = strchr(name, ':');  // end of class name
 508         if (endcn == NULL)  endcn = strchr(name, '(');
 509         if (endcn == NULL)  endcn = name + strlen(name);
 510         while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
 511           --endcn;
 512         st->print(" %s", endcn);
 513         printed = true;
 514       }
 515     }
 516     if (!printed)
 517       _method->print_short_name(st);
 518     st->print(" @ bci:%d",_bci);
 519     if(_reexecute == Reexecute_True)
 520       st->print(" reexecute");
 521   } else {
 522     st->print(" runtime stub");
 523   }
 524   if (caller() != NULL)  caller()->dump_spec(st);
 525 }
 526 
 527 
 528 void JVMState::dump_on(outputStream* st) const {
 529   bool print_map = _map && !((uintptr_t)_map & 1) &&
 530                   ((caller() == NULL) || (caller()->map() != _map));
 531   if (print_map) {
 532     if (_map->len() > _map->req()) {  // _map->has_exceptions()
 533       Node* ex = _map->in(_map->req());  // _map->next_exception()
 534       // skip the first one; it's already being printed
 535       while (ex != NULL && ex->len() > ex->req()) {
 536         ex = ex->in(ex->req());  // ex->next_exception()
 537         ex->dump(1);
 538       }
 539     }
 540     _map->dump(Verbose ? 2 : 1);
 541   }
 542   if (caller() != NULL) {
 543     caller()->dump_on(st);
 544   }
 545   st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
 546              depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
 547   if (_method == NULL) {
 548     st->print_cr("(none)");
 549   } else {
 550     _method->print_name(st);
 551     st->cr();
 552     if (bci() >= 0 && bci() < _method->code_size()) {
 553       st->print("    bc: ");
 554       _method->print_codes_on(bci(), bci()+1, st);
 555     }
 556   }
 557 }
 558 
 559 // Extra way to dump a jvms from the debugger,
 560 // to avoid a bug with C++ member function calls.
 561 void dump_jvms(JVMState* jvms) {
 562   jvms->dump();
 563 }
 564 #endif
 565 
 566 //--------------------------clone_shallow--------------------------------------
 567 JVMState* JVMState::clone_shallow(Compile* C) const {
 568   JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
 569   n->set_bci(_bci);
 570   n->_reexecute = _reexecute;
 571   n->set_locoff(_locoff);
 572   n->set_stkoff(_stkoff);
 573   n->set_monoff(_monoff);
 574   n->set_scloff(_scloff);
 575   n->set_endoff(_endoff);
 576   n->set_sp(_sp);
 577   n->set_map(_map);
 578   return n;
 579 }
 580 
 581 //---------------------------clone_deep----------------------------------------
 582 JVMState* JVMState::clone_deep(Compile* C) const {
 583   JVMState* n = clone_shallow(C);
 584   for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
 585     p->_caller = p->_caller->clone_shallow(C);
 586   }
 587   assert(n->depth() == depth(), "sanity");
 588   assert(n->debug_depth() == debug_depth(), "sanity");
 589   return n;
 590 }
 591 
 592 /**
 593  * Reset map for all callers
 594  */
 595 void JVMState::set_map_deep(SafePointNode* map) {
 596   for (JVMState* p = this; p->_caller != NULL; p = p->_caller) {
 597     p->set_map(map);
 598   }
 599 }
 600 
 601 // Adapt offsets in in-array after adding or removing an edge.
 602 // Prerequisite is that the JVMState is used by only one node.
 603 void JVMState::adapt_position(int delta) {
 604   for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) {
 605     jvms->set_locoff(jvms->locoff() + delta);
 606     jvms->set_stkoff(jvms->stkoff() + delta);
 607     jvms->set_monoff(jvms->monoff() + delta);
 608     jvms->set_scloff(jvms->scloff() + delta);
 609     jvms->set_endoff(jvms->endoff() + delta);
 610   }
 611 }
 612 
 613 // Mirror the stack size calculation in the deopt code
 614 // How much stack space would we need at this point in the program in
 615 // case of deoptimization?
 616 int JVMState::interpreter_frame_size() const {
 617   const JVMState* jvms = this;
 618   int size = 0;
 619   int callee_parameters = 0;
 620   int callee_locals = 0;
 621   int extra_args = method()->max_stack() - stk_size();
 622 
 623   while (jvms != NULL) {
 624     int locks = jvms->nof_monitors();
 625     int temps = jvms->stk_size();
 626     bool is_top_frame = (jvms == this);
 627     ciMethod* method = jvms->method();
 628 
 629     int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
 630                                                                  temps + callee_parameters,
 631                                                                  extra_args,
 632                                                                  locks,
 633                                                                  callee_parameters,
 634                                                                  callee_locals,
 635                                                                  is_top_frame);
 636     size += frame_size;
 637 
 638     callee_parameters = method->size_of_parameters();
 639     callee_locals = method->max_locals();
 640     extra_args = 0;
 641     jvms = jvms->caller();
 642   }
 643   return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
 644 }
 645 
 646 //=============================================================================
 647 uint CallNode::cmp( const Node &n ) const
 648 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
 649 #ifndef PRODUCT
 650 void CallNode::dump_req(outputStream *st) const {
 651   // Dump the required inputs, enclosed in '(' and ')'
 652   uint i;                       // Exit value of loop
 653   for (i = 0; i < req(); i++) {    // For all required inputs
 654     if (i == TypeFunc::Parms) st->print("(");
 655     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 656     else st->print("_ ");
 657   }
 658   st->print(")");
 659 }
 660 
 661 void CallNode::dump_spec(outputStream *st) const {
 662   st->print(" ");
 663   if (tf() != NULL)  tf()->dump_on(st);
 664   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 665   if (jvms() != NULL)  jvms()->dump_spec(st);
 666 }
 667 #endif
 668 
 669 const Type *CallNode::bottom_type() const { return tf()->range(); }
 670 const Type *CallNode::Value(PhaseTransform *phase) const {
 671   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
 672   return tf()->range();
 673 }
 674 
 675 //------------------------------calling_convention-----------------------------
 676 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
 677   // Use the standard compiler calling convention
 678   Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
 679 }
 680 
 681 
 682 //------------------------------match------------------------------------------
 683 // Construct projections for control, I/O, memory-fields, ..., and
 684 // return result(s) along with their RegMask info
 685 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
 686   switch (proj->_con) {
 687   case TypeFunc::Control:
 688   case TypeFunc::I_O:
 689   case TypeFunc::Memory:
 690     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 691 
 692   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
 693     assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 694     // 2nd half of doubles and longs
 695     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
 696 
 697   case TypeFunc::Parms: {       // Normal returns
 698     uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
 699     OptoRegPair regs = is_CallRuntime()
 700       ? match->c_return_value(ideal_reg,true)  // Calls into C runtime
 701       : match->  return_value(ideal_reg,true); // Calls into compiled Java code
 702     RegMask rm = RegMask(regs.first());
 703     if( OptoReg::is_valid(regs.second()) )
 704       rm.Insert( regs.second() );
 705     return new MachProjNode(this,proj->_con,rm,ideal_reg);
 706   }
 707 
 708   case TypeFunc::ReturnAdr:
 709   case TypeFunc::FramePtr:
 710   default:
 711     ShouldNotReachHere();
 712   }
 713   return NULL;
 714 }
 715 
 716 // Do we Match on this edge index or not?  Match no edges
 717 uint CallNode::match_edge(uint idx) const {
 718   return 0;
 719 }
 720 
 721 //
 722 // Determine whether the call could modify the field of the specified
 723 // instance at the specified offset.
 724 //
 725 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
 726   assert((t_oop != NULL), "sanity");
 727   if (t_oop->is_known_instance()) {
 728     // The instance_id is set only for scalar-replaceable allocations which
 729     // are not passed as arguments according to Escape Analysis.
 730     return false;
 731   }
 732   if (t_oop->is_ptr_to_boxed_value()) {
 733     ciKlass* boxing_klass = t_oop->klass();
 734     if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
 735       // Skip unrelated boxing methods.
 736       Node* proj = proj_out(TypeFunc::Parms);
 737       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 738         return false;
 739       }
 740     }
 741     if (is_CallJava() && as_CallJava()->method() != NULL) {
 742       ciMethod* meth = as_CallJava()->method();
 743       if (meth->is_accessor()) {
 744         return false;
 745       }
 746       // May modify (by reflection) if an boxing object is passed
 747       // as argument or returned.
 748       if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) {
 749         Node* proj = proj_out(TypeFunc::Parms);
 750         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 751         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 752                                  (inst_t->klass() == boxing_klass))) {
 753           return true;
 754         }
 755       }
 756       const TypeTuple* d = tf()->domain();
 757       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 758         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 759         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 760                                  (inst_t->klass() == boxing_klass))) {
 761           return true;
 762         }
 763       }
 764       return false;
 765     }
 766   }
 767   return true;
 768 }
 769 
 770 // Does this call have a direct reference to n other than debug information?
 771 bool CallNode::has_non_debug_use(Node *n) {
 772   const TypeTuple * d = tf()->domain();
 773   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 774     Node *arg = in(i);
 775     if (arg == n) {
 776       return true;
 777     }
 778   }
 779   return false;
 780 }
 781 
 782 // Returns the unique CheckCastPP of a call
 783 // or 'this' if there are several CheckCastPP or unexpected uses
 784 // or returns NULL if there is no one.
 785 Node *CallNode::result_cast() {
 786   Node *cast = NULL;
 787 
 788   Node *p = proj_out(TypeFunc::Parms);
 789   if (p == NULL)
 790     return NULL;
 791 
 792   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 793     Node *use = p->fast_out(i);
 794     if (use->is_CheckCastPP()) {
 795       if (cast != NULL) {
 796         return this;  // more than 1 CheckCastPP
 797       }
 798       cast = use;
 799     } else if (!use->is_Initialize() &&
 800                !use->is_AddP() &&
 801                use->Opcode() != Op_MemBarStoreStore) {
 802       // Expected uses are restricted to a CheckCastPP, an Initialize
 803       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 804       // encounter any other use (a Phi node can be seen in rare
 805       // cases) return this to prevent incorrect optimizations.
 806       return this;
 807     }
 808   }
 809   return cast;
 810 }
 811 
 812 
 813 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
 814   projs->fallthrough_proj      = NULL;
 815   projs->fallthrough_catchproj = NULL;
 816   projs->fallthrough_ioproj    = NULL;
 817   projs->catchall_ioproj       = NULL;
 818   projs->catchall_catchproj    = NULL;
 819   projs->fallthrough_memproj   = NULL;
 820   projs->catchall_memproj      = NULL;
 821   projs->resproj               = NULL;
 822   projs->exobj                 = NULL;
 823 
 824   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 825     ProjNode *pn = fast_out(i)->as_Proj();
 826     if (pn->outcnt() == 0) continue;
 827     switch (pn->_con) {
 828     case TypeFunc::Control:
 829       {
 830         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 831         projs->fallthrough_proj = pn;
 832         DUIterator_Fast jmax, j = pn->fast_outs(jmax);
 833         const Node *cn = pn->fast_out(j);
 834         if (cn->is_Catch()) {
 835           ProjNode *cpn = NULL;
 836           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 837             cpn = cn->fast_out(k)->as_Proj();
 838             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 839             if (cpn->_con == CatchProjNode::fall_through_index)
 840               projs->fallthrough_catchproj = cpn;
 841             else {
 842               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
 843               projs->catchall_catchproj = cpn;
 844             }
 845           }
 846         }
 847         break;
 848       }
 849     case TypeFunc::I_O:
 850       if (pn->_is_io_use)
 851         projs->catchall_ioproj = pn;
 852       else
 853         projs->fallthrough_ioproj = pn;
 854       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 855         Node* e = pn->out(j);
 856         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 857           assert(projs->exobj == NULL, "only one");
 858           projs->exobj = e;
 859         }
 860       }
 861       break;
 862     case TypeFunc::Memory:
 863       if (pn->_is_io_use)
 864         projs->catchall_memproj = pn;
 865       else
 866         projs->fallthrough_memproj = pn;
 867       break;
 868     case TypeFunc::Parms:
 869       projs->resproj = pn;
 870       break;
 871     default:
 872       assert(false, "unexpected projection from allocation node.");
 873     }
 874   }
 875 
 876   // The resproj may not exist because the result could be ignored
 877   // and the exception object may not exist if an exception handler
 878   // swallows the exception but all the other must exist and be found.
 879   assert(projs->fallthrough_proj      != NULL, "must be found");
 880   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
 881   assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
 882   assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
 883   assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
 884   assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
 885   if (separate_io_proj) {
 886     assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
 887     assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
 888   }
 889 }
 890 
 891 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 892   CallGenerator* cg = generator();
 893   if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
 894     // Check whether this MH handle call becomes a candidate for inlining
 895     ciMethod* callee = cg->method();
 896     vmIntrinsics::ID iid = callee->intrinsic_id();
 897     if (iid == vmIntrinsics::_invokeBasic) {
 898       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
 899         phase->C->prepend_late_inline(cg);
 900         set_generator(NULL);
 901       }
 902     } else {
 903       assert(callee->has_member_arg(), "wrong type of call?");
 904       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
 905         phase->C->prepend_late_inline(cg);
 906         set_generator(NULL);
 907       }
 908     }
 909   }
 910   return SafePointNode::Ideal(phase, can_reshape);
 911 }
 912 
 913 
 914 //=============================================================================
 915 uint CallJavaNode::size_of() const { return sizeof(*this); }
 916 uint CallJavaNode::cmp( const Node &n ) const {
 917   CallJavaNode &call = (CallJavaNode&)n;
 918   return CallNode::cmp(call) && _method == call._method;
 919 }
 920 #ifndef PRODUCT
 921 void CallJavaNode::dump_spec(outputStream *st) const {
 922   if( _method ) _method->print_short_name(st);
 923   CallNode::dump_spec(st);
 924 }
 925 #endif
 926 
 927 //=============================================================================
 928 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
 929 uint CallStaticJavaNode::cmp( const Node &n ) const {
 930   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
 931   return CallJavaNode::cmp(call);
 932 }
 933 
 934 //----------------------------uncommon_trap_request----------------------------
 935 // If this is an uncommon trap, return the request code, else zero.
 936 int CallStaticJavaNode::uncommon_trap_request() const {
 937   if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
 938     return extract_uncommon_trap_request(this);
 939   }
 940   return 0;
 941 }
 942 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
 943 #ifndef PRODUCT
 944   if (!(call->req() > TypeFunc::Parms &&
 945         call->in(TypeFunc::Parms) != NULL &&
 946         call->in(TypeFunc::Parms)->is_Con() &&
 947         call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
 948     assert(in_dump() != 0, "OK if dumping");
 949     tty->print("[bad uncommon trap]");
 950     return 0;
 951   }
 952 #endif
 953   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
 954 }
 955 
 956 #ifndef PRODUCT
 957 void CallStaticJavaNode::dump_spec(outputStream *st) const {
 958   st->print("# Static ");
 959   if (_name != NULL) {
 960     st->print("%s", _name);
 961     int trap_req = uncommon_trap_request();
 962     if (trap_req != 0) {
 963       char buf[100];
 964       st->print("(%s)",
 965                  Deoptimization::format_trap_request(buf, sizeof(buf),
 966                                                      trap_req));
 967     }
 968     st->print(" ");
 969   }
 970   CallJavaNode::dump_spec(st);
 971 }
 972 #endif
 973 
 974 //=============================================================================
 975 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
 976 uint CallDynamicJavaNode::cmp( const Node &n ) const {
 977   CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
 978   return CallJavaNode::cmp(call);
 979 }
 980 #ifndef PRODUCT
 981 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
 982   st->print("# Dynamic ");
 983   CallJavaNode::dump_spec(st);
 984 }
 985 #endif
 986 
 987 //=============================================================================
 988 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
 989 uint CallRuntimeNode::cmp( const Node &n ) const {
 990   CallRuntimeNode &call = (CallRuntimeNode&)n;
 991   return CallNode::cmp(call) && !strcmp(_name,call._name);
 992 }
 993 #ifndef PRODUCT
 994 void CallRuntimeNode::dump_spec(outputStream *st) const {
 995   st->print("# ");
 996   st->print("%s", _name);
 997   CallNode::dump_spec(st);
 998 }
 999 #endif
1000 
1001 //------------------------------calling_convention-----------------------------
1002 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1003   Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
1004 }
1005 
1006 //=============================================================================
1007 //------------------------------calling_convention-----------------------------
1008 
1009 
1010 //=============================================================================
1011 bool CallLeafNode::is_call_to_arraycopystub() const {
1012   if (_name != NULL && strstr(_name, "arraycopy") != 0) {
1013     return true;
1014   }
1015   return false;
1016 }
1017 
1018 
1019 #ifndef PRODUCT
1020 void CallLeafNode::dump_spec(outputStream *st) const {
1021   st->print("# ");
1022   st->print("%s", _name);
1023   CallNode::dump_spec(st);
1024 }
1025 #endif
1026 
1027 //=============================================================================
1028 
1029 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1030   assert(verify_jvms(jvms), "jvms must match");
1031   int loc = jvms->locoff() + idx;
1032   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1033     // If current local idx is top then local idx - 1 could
1034     // be a long/double that needs to be killed since top could
1035     // represent the 2nd half ofthe long/double.
1036     uint ideal = in(loc -1)->ideal_reg();
1037     if (ideal == Op_RegD || ideal == Op_RegL) {
1038       // set other (low index) half to top
1039       set_req(loc - 1, in(loc));
1040     }
1041   }
1042   set_req(loc, c);
1043 }
1044 
1045 uint SafePointNode::size_of() const { return sizeof(*this); }
1046 uint SafePointNode::cmp( const Node &n ) const {
1047   return (&n == this);          // Always fail except on self
1048 }
1049 
1050 //-------------------------set_next_exception----------------------------------
1051 void SafePointNode::set_next_exception(SafePointNode* n) {
1052   assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1053   if (len() == req()) {
1054     if (n != NULL)  add_prec(n);
1055   } else {
1056     set_prec(req(), n);
1057   }
1058 }
1059 
1060 
1061 //----------------------------next_exception-----------------------------------
1062 SafePointNode* SafePointNode::next_exception() const {
1063   if (len() == req()) {
1064     return NULL;
1065   } else {
1066     Node* n = in(req());
1067     assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1068     return (SafePointNode*) n;
1069   }
1070 }
1071 
1072 
1073 //------------------------------Ideal------------------------------------------
1074 // Skip over any collapsed Regions
1075 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1076   return remove_dead_region(phase, can_reshape) ? this : NULL;
1077 }
1078 
1079 //------------------------------Identity---------------------------------------
1080 // Remove obviously duplicate safepoints
1081 Node *SafePointNode::Identity( PhaseTransform *phase ) {
1082 
1083   // If you have back to back safepoints, remove one
1084   if( in(TypeFunc::Control)->is_SafePoint() )
1085     return in(TypeFunc::Control);
1086 
1087   if( in(0)->is_Proj() ) {
1088     Node *n0 = in(0)->in(0);
1089     // Check if he is a call projection (except Leaf Call)
1090     if( n0->is_Catch() ) {
1091       n0 = n0->in(0)->in(0);
1092       assert( n0->is_Call(), "expect a call here" );
1093     }
1094     if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
1095       // Useless Safepoint, so remove it
1096       return in(TypeFunc::Control);
1097     }
1098   }
1099 
1100   return this;
1101 }
1102 
1103 //------------------------------Value------------------------------------------
1104 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
1105   if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
1106   if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
1107   return Type::CONTROL;
1108 }
1109 
1110 #ifndef PRODUCT
1111 void SafePointNode::dump_spec(outputStream *st) const {
1112   st->print(" SafePoint ");
1113   _replaced_nodes.dump(st);
1114 }
1115 #endif
1116 
1117 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1118   if( idx < TypeFunc::Parms ) return RegMask::Empty;
1119   // Values outside the domain represent debug info
1120   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1121 }
1122 const RegMask &SafePointNode::out_RegMask() const {
1123   return RegMask::Empty;
1124 }
1125 
1126 
1127 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1128   assert((int)grow_by > 0, "sanity");
1129   int monoff = jvms->monoff();
1130   int scloff = jvms->scloff();
1131   int endoff = jvms->endoff();
1132   assert(endoff == (int)req(), "no other states or debug info after me");
1133   Node* top = Compile::current()->top();
1134   for (uint i = 0; i < grow_by; i++) {
1135     ins_req(monoff, top);
1136   }
1137   jvms->set_monoff(monoff + grow_by);
1138   jvms->set_scloff(scloff + grow_by);
1139   jvms->set_endoff(endoff + grow_by);
1140 }
1141 
1142 void SafePointNode::push_monitor(const FastLockNode *lock) {
1143   // Add a LockNode, which points to both the original BoxLockNode (the
1144   // stack space for the monitor) and the Object being locked.
1145   const int MonitorEdges = 2;
1146   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1147   assert(req() == jvms()->endoff(), "correct sizing");
1148   int nextmon = jvms()->scloff();
1149   if (GenerateSynchronizationCode) {
1150     ins_req(nextmon,   lock->box_node());
1151     ins_req(nextmon+1, lock->obj_node());
1152   } else {
1153     Node* top = Compile::current()->top();
1154     ins_req(nextmon, top);
1155     ins_req(nextmon, top);
1156   }
1157   jvms()->set_scloff(nextmon + MonitorEdges);
1158   jvms()->set_endoff(req());
1159 }
1160 
1161 void SafePointNode::pop_monitor() {
1162   // Delete last monitor from debug info
1163   debug_only(int num_before_pop = jvms()->nof_monitors());
1164   const int MonitorEdges = 2;
1165   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1166   int scloff = jvms()->scloff();
1167   int endoff = jvms()->endoff();
1168   int new_scloff = scloff - MonitorEdges;
1169   int new_endoff = endoff - MonitorEdges;
1170   jvms()->set_scloff(new_scloff);
1171   jvms()->set_endoff(new_endoff);
1172   while (scloff > new_scloff)  del_req_ordered(--scloff);
1173   assert(jvms()->nof_monitors() == num_before_pop-1, "");
1174 }
1175 
1176 Node *SafePointNode::peek_monitor_box() const {
1177   int mon = jvms()->nof_monitors() - 1;
1178   assert(mon >= 0, "most have a monitor");
1179   return monitor_box(jvms(), mon);
1180 }
1181 
1182 Node *SafePointNode::peek_monitor_obj() const {
1183   int mon = jvms()->nof_monitors() - 1;
1184   assert(mon >= 0, "most have a monitor");
1185   return monitor_obj(jvms(), mon);
1186 }
1187 
1188 // Do we Match on this edge index or not?  Match no edges
1189 uint SafePointNode::match_edge(uint idx) const {
1190   if( !needs_polling_address_input() )
1191     return 0;
1192 
1193   return (TypeFunc::Parms == idx);
1194 }
1195 
1196 //==============  SafePointScalarObjectNode  ==============
1197 
1198 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1199 #ifdef ASSERT
1200                                                      AllocateNode* alloc,
1201 #endif
1202                                                      uint first_index,
1203                                                      uint n_fields) :
1204   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
1205 #ifdef ASSERT
1206   _alloc(alloc),
1207 #endif
1208   _first_index(first_index),
1209   _n_fields(n_fields)
1210 {
1211   init_class_id(Class_SafePointScalarObject);
1212 }
1213 
1214 // Do not allow value-numbering for SafePointScalarObject node.
1215 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1216 uint SafePointScalarObjectNode::cmp( const Node &n ) const {
1217   return (&n == this); // Always fail except on self
1218 }
1219 
1220 uint SafePointScalarObjectNode::ideal_reg() const {
1221   return 0; // No matching to machine instruction
1222 }
1223 
1224 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1225   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1226 }
1227 
1228 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1229   return RegMask::Empty;
1230 }
1231 
1232 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1233   return 0;
1234 }
1235 
1236 SafePointScalarObjectNode*
1237 SafePointScalarObjectNode::clone(Dict* sosn_map) const {
1238   void* cached = (*sosn_map)[(void*)this];
1239   if (cached != NULL) {
1240     return (SafePointScalarObjectNode*)cached;
1241   }
1242   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1243   sosn_map->Insert((void*)this, (void*)res);
1244   return res;
1245 }
1246 
1247 
1248 #ifndef PRODUCT
1249 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1250   st->print(" # fields@[%d..%d]", first_index(),
1251              first_index() + n_fields() - 1);
1252 }
1253 
1254 #endif
1255 
1256 //=============================================================================
1257 uint AllocateNode::size_of() const { return sizeof(*this); }
1258 
1259 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1260                            Node *ctrl, Node *mem, Node *abio,
1261                            Node *size, Node *klass_node, Node *initial_test)
1262   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1263 {
1264   init_class_id(Class_Allocate);
1265   init_flags(Flag_is_macro);
1266   _is_scalar_replaceable = false;
1267   _is_non_escaping = false;
1268   Node *topnode = C->top();
1269 
1270   init_req( TypeFunc::Control  , ctrl );
1271   init_req( TypeFunc::I_O      , abio );
1272   init_req( TypeFunc::Memory   , mem );
1273   init_req( TypeFunc::ReturnAdr, topnode );
1274   init_req( TypeFunc::FramePtr , topnode );
1275   init_req( AllocSize          , size);
1276   init_req( KlassNode          , klass_node);
1277   init_req( InitialTest        , initial_test);
1278   init_req( ALength            , topnode);
1279   C->add_macro_node(this);
1280 }
1281 
1282 //=============================================================================
1283 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1284   if (remove_dead_region(phase, can_reshape))  return this;
1285   // Don't bother trying to transform a dead node
1286   if (in(0) && in(0)->is_top())  return NULL;
1287 
1288   const Type* type = phase->type(Ideal_length());
1289   if (type->isa_int() && type->is_int()->_hi < 0) {
1290     if (can_reshape) {
1291       PhaseIterGVN *igvn = phase->is_IterGVN();
1292       // Unreachable fall through path (negative array length),
1293       // the allocation can only throw so disconnect it.
1294       Node* proj = proj_out(TypeFunc::Control);
1295       Node* catchproj = NULL;
1296       if (proj != NULL) {
1297         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1298           Node *cn = proj->fast_out(i);
1299           if (cn->is_Catch()) {
1300             catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1301             break;
1302           }
1303         }
1304       }
1305       if (catchproj != NULL && catchproj->outcnt() > 0 &&
1306           (catchproj->outcnt() > 1 ||
1307            catchproj->unique_out()->Opcode() != Op_Halt)) {
1308         assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1309         Node* nproj = catchproj->clone();
1310         igvn->register_new_node_with_optimizer(nproj);
1311 
1312         Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1313         frame = phase->transform(frame);
1314         // Halt & Catch Fire
1315         Node *halt = new HaltNode( nproj, frame );
1316         phase->C->root()->add_req(halt);
1317         phase->transform(halt);
1318 
1319         igvn->replace_node(catchproj, phase->C->top());
1320         return this;
1321       }
1322     } else {
1323       // Can't correct it during regular GVN so register for IGVN
1324       phase->C->record_for_igvn(this);
1325     }
1326   }
1327   return NULL;
1328 }
1329 
1330 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1331 // CastII, if appropriate.  If we are not allowed to create new nodes, and
1332 // a CastII is appropriate, return NULL.
1333 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1334   Node *length = in(AllocateNode::ALength);
1335   assert(length != NULL, "length is not null");
1336 
1337   const TypeInt* length_type = phase->find_int_type(length);
1338   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1339 
1340   if (ary_type != NULL && length_type != NULL) {
1341     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1342     if (narrow_length_type != length_type) {
1343       // Assert one of:
1344       //   - the narrow_length is 0
1345       //   - the narrow_length is not wider than length
1346       assert(narrow_length_type == TypeInt::ZERO ||
1347              length_type->is_con() && narrow_length_type->is_con() &&
1348                 (narrow_length_type->_hi <= length_type->_lo) ||
1349              (narrow_length_type->_hi <= length_type->_hi &&
1350               narrow_length_type->_lo >= length_type->_lo),
1351              "narrow type must be narrower than length type");
1352 
1353       // Return NULL if new nodes are not allowed
1354       if (!allow_new_nodes) return NULL;
1355       // Create a cast which is control dependent on the initialization to
1356       // propagate the fact that the array length must be positive.
1357       length = new CastIINode(length, narrow_length_type);
1358       length->set_req(0, initialization()->proj_out(0));
1359     }
1360   }
1361 
1362   return length;
1363 }
1364 
1365 //=============================================================================
1366 uint LockNode::size_of() const { return sizeof(*this); }
1367 
1368 // Redundant lock elimination
1369 //
1370 // There are various patterns of locking where we release and
1371 // immediately reacquire a lock in a piece of code where no operations
1372 // occur in between that would be observable.  In those cases we can
1373 // skip releasing and reacquiring the lock without violating any
1374 // fairness requirements.  Doing this around a loop could cause a lock
1375 // to be held for a very long time so we concentrate on non-looping
1376 // control flow.  We also require that the operations are fully
1377 // redundant meaning that we don't introduce new lock operations on
1378 // some paths so to be able to eliminate it on others ala PRE.  This
1379 // would probably require some more extensive graph manipulation to
1380 // guarantee that the memory edges were all handled correctly.
1381 //
1382 // Assuming p is a simple predicate which can't trap in any way and s
1383 // is a synchronized method consider this code:
1384 //
1385 //   s();
1386 //   if (p)
1387 //     s();
1388 //   else
1389 //     s();
1390 //   s();
1391 //
1392 // 1. The unlocks of the first call to s can be eliminated if the
1393 // locks inside the then and else branches are eliminated.
1394 //
1395 // 2. The unlocks of the then and else branches can be eliminated if
1396 // the lock of the final call to s is eliminated.
1397 //
1398 // Either of these cases subsumes the simple case of sequential control flow
1399 //
1400 // Addtionally we can eliminate versions without the else case:
1401 //
1402 //   s();
1403 //   if (p)
1404 //     s();
1405 //   s();
1406 //
1407 // 3. In this case we eliminate the unlock of the first s, the lock
1408 // and unlock in the then case and the lock in the final s.
1409 //
1410 // Note also that in all these cases the then/else pieces don't have
1411 // to be trivial as long as they begin and end with synchronization
1412 // operations.
1413 //
1414 //   s();
1415 //   if (p)
1416 //     s();
1417 //     f();
1418 //     s();
1419 //   s();
1420 //
1421 // The code will work properly for this case, leaving in the unlock
1422 // before the call to f and the relock after it.
1423 //
1424 // A potentially interesting case which isn't handled here is when the
1425 // locking is partially redundant.
1426 //
1427 //   s();
1428 //   if (p)
1429 //     s();
1430 //
1431 // This could be eliminated putting unlocking on the else case and
1432 // eliminating the first unlock and the lock in the then side.
1433 // Alternatively the unlock could be moved out of the then side so it
1434 // was after the merge and the first unlock and second lock
1435 // eliminated.  This might require less manipulation of the memory
1436 // state to get correct.
1437 //
1438 // Additionally we might allow work between a unlock and lock before
1439 // giving up eliminating the locks.  The current code disallows any
1440 // conditional control flow between these operations.  A formulation
1441 // similar to partial redundancy elimination computing the
1442 // availability of unlocking and the anticipatability of locking at a
1443 // program point would allow detection of fully redundant locking with
1444 // some amount of work in between.  I'm not sure how often I really
1445 // think that would occur though.  Most of the cases I've seen
1446 // indicate it's likely non-trivial work would occur in between.
1447 // There may be other more complicated constructs where we could
1448 // eliminate locking but I haven't seen any others appear as hot or
1449 // interesting.
1450 //
1451 // Locking and unlocking have a canonical form in ideal that looks
1452 // roughly like this:
1453 //
1454 //              <obj>
1455 //                | \\------+
1456 //                |  \       \
1457 //                | BoxLock   \
1458 //                |  |   |     \
1459 //                |  |    \     \
1460 //                |  |   FastLock
1461 //                |  |   /
1462 //                |  |  /
1463 //                |  |  |
1464 //
1465 //               Lock
1466 //                |
1467 //            Proj #0
1468 //                |
1469 //            MembarAcquire
1470 //                |
1471 //            Proj #0
1472 //
1473 //            MembarRelease
1474 //                |
1475 //            Proj #0
1476 //                |
1477 //              Unlock
1478 //                |
1479 //            Proj #0
1480 //
1481 //
1482 // This code proceeds by processing Lock nodes during PhaseIterGVN
1483 // and searching back through its control for the proper code
1484 // patterns.  Once it finds a set of lock and unlock operations to
1485 // eliminate they are marked as eliminatable which causes the
1486 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1487 //
1488 //=============================================================================
1489 
1490 //
1491 // Utility function to skip over uninteresting control nodes.  Nodes skipped are:
1492 //   - copy regions.  (These may not have been optimized away yet.)
1493 //   - eliminated locking nodes
1494 //
1495 static Node *next_control(Node *ctrl) {
1496   if (ctrl == NULL)
1497     return NULL;
1498   while (1) {
1499     if (ctrl->is_Region()) {
1500       RegionNode *r = ctrl->as_Region();
1501       Node *n = r->is_copy();
1502       if (n == NULL)
1503         break;  // hit a region, return it
1504       else
1505         ctrl = n;
1506     } else if (ctrl->is_Proj()) {
1507       Node *in0 = ctrl->in(0);
1508       if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1509         ctrl = in0->in(0);
1510       } else {
1511         break;
1512       }
1513     } else {
1514       break; // found an interesting control
1515     }
1516   }
1517   return ctrl;
1518 }
1519 //
1520 // Given a control, see if it's the control projection of an Unlock which
1521 // operating on the same object as lock.
1522 //
1523 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1524                                             GrowableArray<AbstractLockNode*> &lock_ops) {
1525   ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1526   if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1527     Node *n = ctrl_proj->in(0);
1528     if (n != NULL && n->is_Unlock()) {
1529       UnlockNode *unlock = n->as_Unlock();
1530       if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1531           BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1532           !unlock->is_eliminated()) {
1533         lock_ops.append(unlock);
1534         return true;
1535       }
1536     }
1537   }
1538   return false;
1539 }
1540 
1541 //
1542 // Find the lock matching an unlock.  Returns null if a safepoint
1543 // or complicated control is encountered first.
1544 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1545   LockNode *lock_result = NULL;
1546   // find the matching lock, or an intervening safepoint
1547   Node *ctrl = next_control(unlock->in(0));
1548   while (1) {
1549     assert(ctrl != NULL, "invalid control graph");
1550     assert(!ctrl->is_Start(), "missing lock for unlock");
1551     if (ctrl->is_top()) break;  // dead control path
1552     if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1553     if (ctrl->is_SafePoint()) {
1554         break;  // found a safepoint (may be the lock we are searching for)
1555     } else if (ctrl->is_Region()) {
1556       // Check for a simple diamond pattern.  Punt on anything more complicated
1557       if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1558         Node *in1 = next_control(ctrl->in(1));
1559         Node *in2 = next_control(ctrl->in(2));
1560         if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1561              (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1562           ctrl = next_control(in1->in(0)->in(0));
1563         } else {
1564           break;
1565         }
1566       } else {
1567         break;
1568       }
1569     } else {
1570       ctrl = next_control(ctrl->in(0));  // keep searching
1571     }
1572   }
1573   if (ctrl->is_Lock()) {
1574     LockNode *lock = ctrl->as_Lock();
1575     if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1576         BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1577       lock_result = lock;
1578     }
1579   }
1580   return lock_result;
1581 }
1582 
1583 // This code corresponds to case 3 above.
1584 
1585 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1586                                                        GrowableArray<AbstractLockNode*> &lock_ops) {
1587   Node* if_node = node->in(0);
1588   bool  if_true = node->is_IfTrue();
1589 
1590   if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1591     Node *lock_ctrl = next_control(if_node->in(0));
1592     if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1593       Node* lock1_node = NULL;
1594       ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1595       if (if_true) {
1596         if (proj->is_IfFalse() && proj->outcnt() == 1) {
1597           lock1_node = proj->unique_out();
1598         }
1599       } else {
1600         if (proj->is_IfTrue() && proj->outcnt() == 1) {
1601           lock1_node = proj->unique_out();
1602         }
1603       }
1604       if (lock1_node != NULL && lock1_node->is_Lock()) {
1605         LockNode *lock1 = lock1_node->as_Lock();
1606         if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&
1607             BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1608             !lock1->is_eliminated()) {
1609           lock_ops.append(lock1);
1610           return true;
1611         }
1612       }
1613     }
1614   }
1615 
1616   lock_ops.trunc_to(0);
1617   return false;
1618 }
1619 
1620 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1621                                GrowableArray<AbstractLockNode*> &lock_ops) {
1622   // check each control merging at this point for a matching unlock.
1623   // in(0) should be self edge so skip it.
1624   for (int i = 1; i < (int)region->req(); i++) {
1625     Node *in_node = next_control(region->in(i));
1626     if (in_node != NULL) {
1627       if (find_matching_unlock(in_node, lock, lock_ops)) {
1628         // found a match so keep on checking.
1629         continue;
1630       } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1631         continue;
1632       }
1633 
1634       // If we fall through to here then it was some kind of node we
1635       // don't understand or there wasn't a matching unlock, so give
1636       // up trying to merge locks.
1637       lock_ops.trunc_to(0);
1638       return false;
1639     }
1640   }
1641   return true;
1642 
1643 }
1644 
1645 #ifndef PRODUCT
1646 //
1647 // Create a counter which counts the number of times this lock is acquired
1648 //
1649 void AbstractLockNode::create_lock_counter(JVMState* state) {
1650   _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1651 }
1652 
1653 void AbstractLockNode::set_eliminated_lock_counter() {
1654   if (_counter) {
1655     // Update the counter to indicate that this lock was eliminated.
1656     // The counter update code will stay around even though the
1657     // optimizer will eliminate the lock operation itself.
1658     _counter->set_tag(NamedCounter::EliminatedLockCounter);
1659   }
1660 }
1661 #endif
1662 
1663 //=============================================================================
1664 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1665 
1666   // perform any generic optimizations first (returns 'this' or NULL)
1667   Node *result = SafePointNode::Ideal(phase, can_reshape);
1668   if (result != NULL)  return result;
1669   // Don't bother trying to transform a dead node
1670   if (in(0) && in(0)->is_top())  return NULL;
1671 
1672   // Now see if we can optimize away this lock.  We don't actually
1673   // remove the locking here, we simply set the _eliminate flag which
1674   // prevents macro expansion from expanding the lock.  Since we don't
1675   // modify the graph, the value returned from this function is the
1676   // one computed above.
1677   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1678     //
1679     // If we are locking an unescaped object, the lock/unlock is unnecessary
1680     //
1681     ConnectionGraph *cgr = phase->C->congraph();
1682     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1683       assert(!is_eliminated() || is_coarsened(), "sanity");
1684       // The lock could be marked eliminated by lock coarsening
1685       // code during first IGVN before EA. Replace coarsened flag
1686       // to eliminate all associated locks/unlocks.
1687 #ifdef ASSERT
1688       this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
1689 #endif
1690       this->set_non_esc_obj();
1691       return result;
1692     }
1693 
1694     //
1695     // Try lock coarsening
1696     //
1697     PhaseIterGVN* iter = phase->is_IterGVN();
1698     if (iter != NULL && !is_eliminated()) {
1699 
1700       GrowableArray<AbstractLockNode*>   lock_ops;
1701 
1702       Node *ctrl = next_control(in(0));
1703 
1704       // now search back for a matching Unlock
1705       if (find_matching_unlock(ctrl, this, lock_ops)) {
1706         // found an unlock directly preceding this lock.  This is the
1707         // case of single unlock directly control dependent on a
1708         // single lock which is the trivial version of case 1 or 2.
1709       } else if (ctrl->is_Region() ) {
1710         if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1711         // found lock preceded by multiple unlocks along all paths
1712         // joining at this point which is case 3 in description above.
1713         }
1714       } else {
1715         // see if this lock comes from either half of an if and the
1716         // predecessors merges unlocks and the other half of the if
1717         // performs a lock.
1718         if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1719           // found unlock splitting to an if with locks on both branches.
1720         }
1721       }
1722 
1723       if (lock_ops.length() > 0) {
1724         // add ourselves to the list of locks to be eliminated.
1725         lock_ops.append(this);
1726 
1727   #ifndef PRODUCT
1728         if (PrintEliminateLocks) {
1729           int locks = 0;
1730           int unlocks = 0;
1731           for (int i = 0; i < lock_ops.length(); i++) {
1732             AbstractLockNode* lock = lock_ops.at(i);
1733             if (lock->Opcode() == Op_Lock)
1734               locks++;
1735             else
1736               unlocks++;
1737             if (Verbose) {
1738               lock->dump(1);
1739             }
1740           }
1741           tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1742         }
1743   #endif
1744 
1745         // for each of the identified locks, mark them
1746         // as eliminatable
1747         for (int i = 0; i < lock_ops.length(); i++) {
1748           AbstractLockNode* lock = lock_ops.at(i);
1749 
1750           // Mark it eliminated by coarsening and update any counters
1751 #ifdef ASSERT
1752           lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
1753 #endif
1754           lock->set_coarsened();
1755         }
1756       } else if (ctrl->is_Region() &&
1757                  iter->_worklist.member(ctrl)) {
1758         // We weren't able to find any opportunities but the region this
1759         // lock is control dependent on hasn't been processed yet so put
1760         // this lock back on the worklist so we can check again once any
1761         // region simplification has occurred.
1762         iter->_worklist.push(this);
1763       }
1764     }
1765   }
1766 
1767   return result;
1768 }
1769 
1770 //=============================================================================
1771 bool LockNode::is_nested_lock_region() {
1772   return is_nested_lock_region(NULL);
1773 }
1774 
1775 // p is used for access to compilation log; no logging if NULL
1776 bool LockNode::is_nested_lock_region(Compile * c) {
1777   BoxLockNode* box = box_node()->as_BoxLock();
1778   int stk_slot = box->stack_slot();
1779   if (stk_slot <= 0) {
1780 #ifdef ASSERT
1781     this->log_lock_optimization(c, "eliminate_lock_INLR_1");
1782 #endif
1783     return false; // External lock or it is not Box (Phi node).
1784   }
1785 
1786   // Ignore complex cases: merged locks or multiple locks.
1787   Node* obj = obj_node();
1788   LockNode* unique_lock = NULL;
1789   if (!box->is_simple_lock_region(&unique_lock, obj)) {
1790 #ifdef ASSERT
1791     this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
1792 #endif
1793     return false;
1794   }
1795   if (unique_lock != this) {
1796 #ifdef ASSERT
1797     this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
1798 #endif
1799     return false;
1800   }
1801 
1802   // Look for external lock for the same object.
1803   SafePointNode* sfn = this->as_SafePoint();
1804   JVMState* youngest_jvms = sfn->jvms();
1805   int max_depth = youngest_jvms->depth();
1806   for (int depth = 1; depth <= max_depth; depth++) {
1807     JVMState* jvms = youngest_jvms->of_depth(depth);
1808     int num_mon  = jvms->nof_monitors();
1809     // Loop over monitors
1810     for (int idx = 0; idx < num_mon; idx++) {
1811       Node* obj_node = sfn->monitor_obj(jvms, idx);
1812       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1813       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1814         return true;
1815       }
1816     }
1817   }
1818 #ifdef ASSERT
1819   this->log_lock_optimization(c, "eliminate_lock_INLR_3");
1820 #endif
1821   return false;
1822 }
1823 
1824 //=============================================================================
1825 uint UnlockNode::size_of() const { return sizeof(*this); }
1826 
1827 //=============================================================================
1828 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1829 
1830   // perform any generic optimizations first (returns 'this' or NULL)
1831   Node *result = SafePointNode::Ideal(phase, can_reshape);
1832   if (result != NULL)  return result;
1833   // Don't bother trying to transform a dead node
1834   if (in(0) && in(0)->is_top())  return NULL;
1835 
1836   // Now see if we can optimize away this unlock.  We don't actually
1837   // remove the unlocking here, we simply set the _eliminate flag which
1838   // prevents macro expansion from expanding the unlock.  Since we don't
1839   // modify the graph, the value returned from this function is the
1840   // one computed above.
1841   // Escape state is defined after Parse phase.
1842   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1843     //
1844     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1845     //
1846     ConnectionGraph *cgr = phase->C->congraph();
1847     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1848       assert(!is_eliminated() || is_coarsened(), "sanity");
1849       // The lock could be marked eliminated by lock coarsening
1850       // code during first IGVN before EA. Replace coarsened flag
1851       // to eliminate all associated locks/unlocks.
1852 #ifdef ASSERT
1853       this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
1854 #endif
1855       this->set_non_esc_obj();
1856     }
1857   }
1858   return result;
1859 }
1860 
1861 const char * AbstractLockNode::kind_as_string() const {
1862   return is_coarsened()   ? "coarsened" :
1863          is_nested()      ? "nested" :
1864          is_non_esc_obj() ? "non_escaping" :
1865          "?";
1866 }
1867 
1868 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag)  const {
1869   if (C == NULL) {
1870     return;
1871   }
1872   CompileLog* log = C->log();
1873   if (log != NULL) {
1874     log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'",
1875           tag, is_Lock(), C->compile_id(),
1876           is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
1877           kind_as_string());
1878     log->stamp();
1879     log->end_head();
1880     JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
1881     while (p != NULL) {
1882       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1883       p = p->caller();
1884     }
1885     log->tail(tag);
1886   }
1887 }
1888 
1889 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase) {
1890   if (dest_t->is_known_instance() && t_oop->is_known_instance()) {
1891     return dest_t->instance_id() == t_oop->instance_id();
1892   }
1893 
1894   if (dest_t->isa_instptr() && !dest_t->klass()->equals(phase->C->env()->Object_klass())) {
1895     // clone
1896     if (t_oop->isa_aryptr()) {
1897       return false;
1898     }
1899     if (!t_oop->isa_instptr()) {
1900       return true;
1901     }
1902     if (dest_t->klass()->is_subtype_of(t_oop->klass()) || t_oop->klass()->is_subtype_of(dest_t->klass())) {
1903       return true;
1904     }
1905     // unrelated
1906     return false;
1907   }
1908 
1909   if (dest_t->isa_aryptr()) {
1910     // arraycopy or array clone
1911     if (t_oop->isa_instptr()) {
1912       return false;
1913     }
1914     if (!t_oop->isa_aryptr()) {
1915       return true;
1916     }
1917 
1918     const Type* elem = dest_t->is_aryptr()->elem();
1919     if (elem == Type::BOTTOM) {
1920       // An array but we don't know what elements are
1921       return true;
1922     }
1923 
1924     dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
1925     uint dest_alias = phase->C->get_alias_index(dest_t);
1926     uint t_oop_alias = phase->C->get_alias_index(t_oop);
1927 
1928     return dest_alias == t_oop_alias;
1929   }
1930 
1931   return true;
1932 }
1933 
1934 bool CallLeafNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
1935   if (is_call_to_arraycopystub()) {
1936     const TypeTuple* args = _tf->domain();
1937     Node* dest = NULL;
1938     // Stubs that can be called once an ArrayCopyNode is expanded have
1939     // different signatures. Look for the second pointer argument,
1940     // that is the destination of the copy.
1941     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
1942       if (args->field_at(i)->isa_ptr()) {
1943         j++;
1944         if (j == 2) {
1945           dest = in(i);
1946           break;
1947         }
1948       }
1949     }
1950     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
1951       return true;
1952     }
1953     return false;
1954   }
1955   return CallNode::may_modify(t_oop, phase);
1956 }