1 /*
   2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Portions of code courtesy of Clifford Click
  26 
  27 // Optimization - Graph Style
  28 
  29 class Chaitin;
  30 class NamedCounter;
  31 class MultiNode;
  32 class  SafePointNode;
  33 class   CallNode;
  34 class     CallJavaNode;
  35 class       CallStaticJavaNode;
  36 class       CallDynamicJavaNode;
  37 class     CallRuntimeNode;
  38 class       CallLeafNode;
  39 class         CallLeafNoFPNode;
  40 class     AllocateNode;
  41 class       AllocateArrayNode;
  42 class     LockNode;
  43 class     UnlockNode;
  44 class JVMState;
  45 class OopMap;
  46 class State;
  47 class StartNode;
  48 class MachCallNode;
  49 class FastLockNode;
  50 
  51 //------------------------------StartNode--------------------------------------
  52 // The method start node
  53 class StartNode : public MultiNode {
  54   virtual uint cmp( const Node &n ) const;
  55   virtual uint size_of() const; // Size is bigger
  56 public:
  57   const TypeTuple *_domain;
  58   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  59     init_class_id(Class_Start);
  60     init_flags(Flag_is_block_start);
  61     init_req(0,this);
  62     init_req(1,root);
  63   }
  64   virtual int Opcode() const;
  65   virtual bool pinned() const { return true; };
  66   virtual const Type *bottom_type() const;
  67   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  68   virtual const Type *Value( PhaseTransform *phase ) const;
  69   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  70   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  71   virtual const RegMask &in_RegMask(uint) const;
  72   virtual Node *match( const ProjNode *proj, const Matcher *m );
  73   virtual uint ideal_reg() const { return 0; }
  74 #ifndef PRODUCT
  75   virtual void  dump_spec(outputStream *st) const;
  76 #endif
  77 };
  78 
  79 //------------------------------StartOSRNode-----------------------------------
  80 // The method start node for on stack replacement code
  81 class StartOSRNode : public StartNode {
  82 public:
  83   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  84   virtual int   Opcode() const;
  85   static  const TypeTuple *osr_domain();
  86 };
  87 
  88 
  89 //------------------------------ParmNode---------------------------------------
  90 // Incoming parameters
  91 class ParmNode : public ProjNode {
  92   static const char * const names[TypeFunc::Parms+1];
  93 public:
  94   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
  95     init_class_id(Class_Parm);
  96   }
  97   virtual int Opcode() const;
  98   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
  99   virtual uint ideal_reg() const;
 100 #ifndef PRODUCT
 101   virtual void dump_spec(outputStream *st) const;
 102 #endif
 103 };
 104 
 105 
 106 //------------------------------ReturnNode-------------------------------------
 107 // Return from subroutine node
 108 class ReturnNode : public Node {
 109 public:
 110   ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
 111   virtual int Opcode() const;
 112   virtual bool  is_CFG() const { return true; }
 113   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 114   virtual bool depends_only_on_test() const { return false; }
 115   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 116   virtual const Type *Value( PhaseTransform *phase ) const;
 117   virtual uint ideal_reg() const { return NotAMachineReg; }
 118   virtual uint match_edge(uint idx) const;
 119 #ifndef PRODUCT
 120   virtual void dump_req() const;
 121 #endif
 122 };
 123 
 124 
 125 //------------------------------RethrowNode------------------------------------
 126 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
 127 // ends the current basic block like a ReturnNode.  Restores registers and
 128 // unwinds stack.  Rethrow happens in the caller's method.
 129 class RethrowNode : public Node {
 130  public:
 131   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
 132   virtual int Opcode() const;
 133   virtual bool  is_CFG() const { return true; }
 134   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 135   virtual bool depends_only_on_test() const { return false; }
 136   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 137   virtual const Type *Value( PhaseTransform *phase ) const;
 138   virtual uint match_edge(uint idx) const;
 139   virtual uint ideal_reg() const { return NotAMachineReg; }
 140 #ifndef PRODUCT
 141   virtual void dump_req() const;
 142 #endif
 143 };
 144 
 145 
 146 //------------------------------TailCallNode-----------------------------------
 147 // Pop stack frame and jump indirect
 148 class TailCallNode : public ReturnNode {
 149 public:
 150   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
 151     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
 152     init_req(TypeFunc::Parms, target);
 153     init_req(TypeFunc::Parms+1, moop);
 154   }
 155 
 156   virtual int Opcode() const;
 157   virtual uint match_edge(uint idx) const;
 158 };
 159 
 160 //------------------------------TailJumpNode-----------------------------------
 161 // Pop stack frame and jump indirect
 162 class TailJumpNode : public ReturnNode {
 163 public:
 164   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 165     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 166     init_req(TypeFunc::Parms, target);
 167     init_req(TypeFunc::Parms+1, ex_oop);
 168   }
 169 
 170   virtual int Opcode() const;
 171   virtual uint match_edge(uint idx) const;
 172 };
 173 
 174 //-------------------------------JVMState-------------------------------------
 175 // A linked list of JVMState nodes captures the whole interpreter state,
 176 // plus GC roots, for all active calls at some call site in this compilation
 177 // unit.  (If there is no inlining, then the list has exactly one link.)
 178 // This provides a way to map the optimized program back into the interpreter,
 179 // or to let the GC mark the stack.
 180 class JVMState : public ResourceObj {
 181 public:
 182   typedef enum {
 183     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 184     Reexecute_False     =  0, // false       -- do not reexecute
 185     Reexecute_True      =  1  // true        -- reexecute the bytecode
 186   } ReexecuteState; //Reexecute State
 187 
 188 private:
 189   JVMState*         _caller;    // List pointer for forming scope chains
 190   uint              _depth;     // One mroe than caller depth, or one.
 191   uint              _locoff;    // Offset to locals in input edge mapping
 192   uint              _stkoff;    // Offset to stack in input edge mapping
 193   uint              _monoff;    // Offset to monitors in input edge mapping
 194   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 195   uint              _endoff;    // Offset to end of input edge mapping
 196   uint              _sp;        // Jave Expression Stack Pointer for this state
 197   int               _bci;       // Byte Code Index of this JVM point
 198   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 199   ciMethod*         _method;    // Method Pointer
 200   SafePointNode*    _map;       // Map node associated with this scope
 201 public:
 202   friend class Compile;
 203   friend class PreserveReexecuteState;
 204 
 205   // Because JVMState objects live over the entire lifetime of the
 206   // Compile object, they are allocated into the comp_arena, which
 207   // does not get resource marked or reset during the compile process
 208   void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
 209   void operator delete( void * ) { } // fast deallocation
 210 
 211   // Create a new JVMState, ready for abstract interpretation.
 212   JVMState(ciMethod* method, JVMState* caller);
 213   JVMState(int stack_size);  // root state; has a null method
 214 
 215   // Access functions for the JVM
 216   uint              locoff() const { return _locoff; }
 217   uint              stkoff() const { return _stkoff; }
 218   uint              argoff() const { return _stkoff + _sp; }
 219   uint              monoff() const { return _monoff; }
 220   uint              scloff() const { return _scloff; }
 221   uint              endoff() const { return _endoff; }
 222   uint              oopoff() const { return debug_end(); }
 223 
 224   int            loc_size() const { return _stkoff - _locoff; }
 225   int            stk_size() const { return _monoff - _stkoff; }
 226   int            mon_size() const { return _scloff - _monoff; }
 227   int            scl_size() const { return _endoff - _scloff; }
 228 
 229   bool        is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
 230   bool        is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
 231   bool        is_mon(uint i) const { return i >= _monoff && i < _scloff; }
 232   bool        is_scl(uint i) const { return i >= _scloff && i < _endoff; }
 233 
 234   uint                      sp() const { return _sp; }
 235   int                      bci() const { return _bci; }
 236   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 237   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 238   bool              has_method() const { return _method != NULL; }
 239   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 240   JVMState*             caller() const { return _caller; }
 241   SafePointNode*           map() const { return _map; }
 242   uint                   depth() const { return _depth; }
 243   uint             debug_start() const; // returns locoff of root caller
 244   uint               debug_end() const; // returns endoff of self
 245   uint              debug_size() const {
 246     return loc_size() + sp() + mon_size() + scl_size();
 247   }
 248   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 249 
 250   // Returns the JVM state at the desired depth (1 == root).
 251   JVMState* of_depth(int d) const;
 252 
 253   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 254   bool same_calls_as(const JVMState* that) const;
 255 
 256   // Monitors (monitors are stored as (boxNode, objNode) pairs
 257   enum { logMonitorEdges = 1 };
 258   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
 259   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
 260   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
 261   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 262   bool is_monitor_box(uint off)    const {
 263     assert(is_mon(off), "should be called only for monitor edge");
 264     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 265   }
 266   bool is_monitor_use(uint off)    const { return (is_mon(off)
 267                                                    && is_monitor_box(off))
 268                                              || (caller() && caller()->is_monitor_use(off)); }
 269 
 270   // Initialization functions for the JVM
 271   void              set_locoff(uint off) { _locoff = off; }
 272   void              set_stkoff(uint off) { _stkoff = off; }
 273   void              set_monoff(uint off) { _monoff = off; }
 274   void              set_scloff(uint off) { _scloff = off; }
 275   void              set_endoff(uint off) { _endoff = off; }
 276   void              set_offsets(uint off) {
 277     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 278   }
 279   void              set_map(SafePointNode *map) { _map = map; }
 280   void              set_sp(uint sp) { _sp = sp; }
 281                     // _reexecute is initialized to "undefined" for a new bci
 282   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
 283   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 284 
 285   // Miscellaneous utility functions
 286   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 287   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 288 
 289 #ifndef PRODUCT
 290   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 291   void      dump_spec(outputStream *st) const;
 292   void      dump_on(outputStream* st) const;
 293   void      dump() const {
 294     dump_on(tty);
 295   }
 296 #endif
 297 };
 298 
 299 //------------------------------SafePointNode----------------------------------
 300 // A SafePointNode is a subclass of a MultiNode for convenience (and
 301 // potential code sharing) only - conceptually it is independent of
 302 // the Node semantics.
 303 class SafePointNode : public MultiNode {
 304   virtual uint           cmp( const Node &n ) const;
 305   virtual uint           size_of() const;       // Size is bigger
 306 
 307 public:
 308   SafePointNode(uint edges, JVMState* jvms,
 309                 // A plain safepoint advertises no memory effects (NULL):
 310                 const TypePtr* adr_type = NULL)
 311     : MultiNode( edges ),
 312       _jvms(jvms),
 313       _oop_map(NULL),
 314       _adr_type(adr_type)
 315   {
 316     init_class_id(Class_SafePoint);
 317   }
 318 
 319   OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
 320   JVMState* const _jvms;      // Pointer to list of JVM State objects
 321   const TypePtr*  _adr_type;  // What type of memory does this node produce?
 322 
 323   // Many calls take *all* of memory as input,
 324   // but some produce a limited subset of that memory as output.
 325   // The adr_type reports the call's behavior as a store, not a load.
 326 
 327   virtual JVMState* jvms() const { return _jvms; }
 328   void set_jvms(JVMState* s) {
 329     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
 330   }
 331   OopMap *oop_map() const { return _oop_map; }
 332   void set_oop_map(OopMap *om) { _oop_map = om; }
 333 
 334   // Functionality from old debug nodes which has changed
 335   Node *local(JVMState* jvms, uint idx) const {
 336     assert(verify_jvms(jvms), "jvms must match");
 337     return in(jvms->locoff() + idx);
 338   }
 339   Node *stack(JVMState* jvms, uint idx) const {
 340     assert(verify_jvms(jvms), "jvms must match");
 341     return in(jvms->stkoff() + idx);
 342   }
 343   Node *argument(JVMState* jvms, uint idx) const {
 344     assert(verify_jvms(jvms), "jvms must match");
 345     return in(jvms->argoff() + idx);
 346   }
 347   Node *monitor_box(JVMState* jvms, uint idx) const {
 348     assert(verify_jvms(jvms), "jvms must match");
 349     return in(jvms->monitor_box_offset(idx));
 350   }
 351   Node *monitor_obj(JVMState* jvms, uint idx) const {
 352     assert(verify_jvms(jvms), "jvms must match");
 353     return in(jvms->monitor_obj_offset(idx));
 354   }
 355 
 356   void  set_local(JVMState* jvms, uint idx, Node *c);
 357 
 358   void  set_stack(JVMState* jvms, uint idx, Node *c) {
 359     assert(verify_jvms(jvms), "jvms must match");
 360     set_req(jvms->stkoff() + idx, c);
 361   }
 362   void  set_argument(JVMState* jvms, uint idx, Node *c) {
 363     assert(verify_jvms(jvms), "jvms must match");
 364     set_req(jvms->argoff() + idx, c);
 365   }
 366   void ensure_stack(JVMState* jvms, uint stk_size) {
 367     assert(verify_jvms(jvms), "jvms must match");
 368     int grow_by = (int)stk_size - (int)jvms->stk_size();
 369     if (grow_by > 0)  grow_stack(jvms, grow_by);
 370   }
 371   void grow_stack(JVMState* jvms, uint grow_by);
 372   // Handle monitor stack
 373   void push_monitor( const FastLockNode *lock );
 374   void pop_monitor ();
 375   Node *peek_monitor_box() const;
 376   Node *peek_monitor_obj() const;
 377 
 378   // Access functions for the JVM
 379   Node *control  () const { return in(TypeFunc::Control  ); }
 380   Node *i_o      () const { return in(TypeFunc::I_O      ); }
 381   Node *memory   () const { return in(TypeFunc::Memory   ); }
 382   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
 383   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
 384 
 385   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
 386   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
 387   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
 388 
 389   MergeMemNode* merged_memory() const {
 390     return in(TypeFunc::Memory)->as_MergeMem();
 391   }
 392 
 393   // The parser marks useless maps as dead when it's done with them:
 394   bool is_killed() { return in(TypeFunc::Control) == NULL; }
 395 
 396   // Exception states bubbling out of subgraphs such as inlined calls
 397   // are recorded here.  (There might be more than one, hence the "next".)
 398   // This feature is used only for safepoints which serve as "maps"
 399   // for JVM states during parsing, intrinsic expansion, etc.
 400   SafePointNode*         next_exception() const;
 401   void               set_next_exception(SafePointNode* n);
 402   bool                   has_exceptions() const { return next_exception() != NULL; }
 403 
 404   // Standard Node stuff
 405   virtual int            Opcode() const;
 406   virtual bool           pinned() const { return true; }
 407   virtual const Type    *Value( PhaseTransform *phase ) const;
 408   virtual const Type    *bottom_type() const { return Type::CONTROL; }
 409   virtual const TypePtr *adr_type() const { return _adr_type; }
 410   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
 411   virtual Node          *Identity( PhaseTransform *phase );
 412   virtual uint           ideal_reg() const { return 0; }
 413   virtual const RegMask &in_RegMask(uint) const;
 414   virtual const RegMask &out_RegMask() const;
 415   virtual uint           match_edge(uint idx) const;
 416 
 417   static  bool           needs_polling_address_input();
 418 
 419 #ifndef PRODUCT
 420   virtual void              dump_spec(outputStream *st) const;
 421 #endif
 422 };
 423 
 424 //------------------------------SafePointScalarObjectNode----------------------
 425 // A SafePointScalarObjectNode represents the state of a scalarized object
 426 // at a safepoint.
 427 
 428 class SafePointScalarObjectNode: public TypeNode {
 429   uint _first_index; // First input edge index of a SafePoint node where
 430                      // states of the scalarized object fields are collected.
 431   uint _n_fields;    // Number of non-static fields of the scalarized object.
 432   DEBUG_ONLY(AllocateNode* _alloc;)
 433 public:
 434   SafePointScalarObjectNode(const TypeOopPtr* tp,
 435 #ifdef ASSERT
 436                             AllocateNode* alloc,
 437 #endif
 438                             uint first_index, uint n_fields);
 439   virtual int Opcode() const;
 440   virtual uint           ideal_reg() const;
 441   virtual const RegMask &in_RegMask(uint) const;
 442   virtual const RegMask &out_RegMask() const;
 443   virtual uint           match_edge(uint idx) const;
 444 
 445   uint first_index() const { return _first_index; }
 446   uint n_fields()    const { return _n_fields; }
 447   DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; })
 448 
 449   // SafePointScalarObject should be always pinned to the control edge
 450   // of the SafePoint node for which it was generated.
 451   virtual bool pinned() const; // { return true; }
 452 
 453   // SafePointScalarObject depends on the SafePoint node
 454   // for which it was generated.
 455   virtual bool depends_only_on_test() const; // { return false; }
 456 
 457   virtual uint size_of() const { return sizeof(*this); }
 458 
 459   // Assumes that "this" is an argument to a safepoint node "s", and that
 460   // "new_call" is being created to correspond to "s".  But the difference
 461   // between the start index of the jvmstates of "new_call" and "s" is
 462   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 463   // corresponds appropriately to "this" in "new_call".  Assumes that
 464   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 465   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 466   SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
 467 
 468 #ifndef PRODUCT
 469   virtual void              dump_spec(outputStream *st) const;
 470 #endif
 471 };
 472 
 473 
 474 // Simple container for the outgoing projections of a call.  Useful
 475 // for serious surgery on calls.
 476 class CallProjections : public StackObj {
 477 public:
 478   Node* fallthrough_proj;
 479   Node* fallthrough_catchproj;
 480   Node* fallthrough_memproj;
 481   Node* fallthrough_ioproj;
 482   Node* catchall_catchproj;
 483   Node* catchall_memproj;
 484   Node* catchall_ioproj;
 485   Node* resproj;
 486   Node* exobj;
 487 };
 488 
 489 
 490 //------------------------------CallNode---------------------------------------
 491 // Call nodes now subsume the function of debug nodes at callsites, so they
 492 // contain the functionality of a full scope chain of debug nodes.
 493 class CallNode : public SafePointNode {
 494 public:
 495   const TypeFunc *_tf;        // Function type
 496   address      _entry_point;  // Address of method being called
 497   float        _cnt;          // Estimate of number of times called
 498 
 499   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
 500     : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
 501       _tf(tf),
 502       _entry_point(addr),
 503       _cnt(COUNT_UNKNOWN)
 504   {
 505     init_class_id(Class_Call);
 506     init_flags(Flag_is_Call);
 507   }
 508 
 509   const TypeFunc* tf()        const { return _tf; }
 510   const address entry_point() const { return _entry_point; }
 511   const float   cnt()         const { return _cnt; }
 512 
 513   void set_tf(const TypeFunc* tf) { _tf = tf; }
 514   void set_entry_point(address p) { _entry_point = p; }
 515   void set_cnt(float c)           { _cnt = c; }
 516 
 517   virtual const Type *bottom_type() const;
 518   virtual const Type *Value( PhaseTransform *phase ) const;
 519   virtual Node *Identity( PhaseTransform *phase ) { return this; }
 520   virtual uint        cmp( const Node &n ) const;
 521   virtual uint        size_of() const = 0;
 522   virtual void        calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 523   virtual Node       *match( const ProjNode *proj, const Matcher *m );
 524   virtual uint        ideal_reg() const { return NotAMachineReg; }
 525   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 526   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 527   virtual bool        guaranteed_safepoint()  { return true; }
 528   // For macro nodes, the JVMState gets modified during expansion, so when cloning
 529   // the node the JVMState must be cloned.
 530   virtual void        clone_jvms() { }   // default is not to clone
 531 
 532   // Returns true if the call may modify n
 533   virtual bool        may_modify(const TypePtr *addr_t, PhaseTransform *phase);
 534   // Does this node have a use of n other than in debug information?
 535   bool                has_non_debug_use(Node *n);
 536   // Returns the unique CheckCastPP of a call
 537   // or result projection is there are several CheckCastPP
 538   // or returns NULL if there is no one.
 539   Node *result_cast();
 540 
 541   // Collect all the interesting edges from a call for use in
 542   // replacing the call by something else.  Used by macro expansion
 543   // and the late inlining support.
 544   void extract_projections(CallProjections* projs, bool separate_io_proj);
 545 
 546   virtual uint match_edge(uint idx) const;
 547 
 548 #ifndef PRODUCT
 549   virtual void        dump_req()  const;
 550   virtual void        dump_spec(outputStream *st) const;
 551 #endif
 552 };
 553 
 554 
 555 //------------------------------CallJavaNode-----------------------------------
 556 // Make a static or dynamic subroutine call node using Java calling
 557 // convention.  (The "Java" calling convention is the compiler's calling
 558 // convention, as opposed to the interpreter's or that of native C.)
 559 class CallJavaNode : public CallNode {
 560 protected:
 561   virtual uint cmp( const Node &n ) const;
 562   virtual uint size_of() const; // Size is bigger
 563 
 564   bool    _optimized_virtual;
 565   bool    _method_handle_invoke;
 566   ciMethod* _method;            // Method being direct called
 567 public:
 568   const int       _bci;         // Byte Code Index of call byte code
 569   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
 570     : CallNode(tf, addr, TypePtr::BOTTOM),
 571       _method(method), _bci(bci),
 572       _optimized_virtual(false),
 573       _method_handle_invoke(false)
 574   {
 575     init_class_id(Class_CallJava);
 576   }
 577 
 578   virtual int   Opcode() const;
 579   ciMethod* method() const                { return _method; }
 580   void  set_method(ciMethod *m)           { _method = m; }
 581   void  set_optimized_virtual(bool f)     { _optimized_virtual = f; }
 582   bool  is_optimized_virtual() const      { return _optimized_virtual; }
 583   void  set_method_handle_invoke(bool f)  { _method_handle_invoke = f; }
 584   bool  is_method_handle_invoke() const   { return _method_handle_invoke; }
 585 
 586 #ifndef PRODUCT
 587   virtual void  dump_spec(outputStream *st) const;
 588 #endif
 589 };
 590 
 591 //------------------------------CallStaticJavaNode-----------------------------
 592 // Make a direct subroutine call using Java calling convention (for static
 593 // calls and optimized virtual calls, plus calls to wrappers for run-time
 594 // routines); generates static stub.
 595 class CallStaticJavaNode : public CallJavaNode {
 596   virtual uint cmp( const Node &n ) const;
 597   virtual uint size_of() const; // Size is bigger
 598 public:
 599   CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
 600     : CallJavaNode(tf, addr, method, bci), _name(NULL) {
 601     init_class_id(Class_CallStaticJava);
 602   }
 603   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
 604                      const TypePtr* adr_type)
 605     : CallJavaNode(tf, addr, NULL, bci), _name(name) {
 606     init_class_id(Class_CallStaticJava);
 607     // This node calls a runtime stub, which often has narrow memory effects.
 608     _adr_type = adr_type;
 609   }
 610   const char *_name;            // Runtime wrapper name
 611 
 612   // If this is an uncommon trap, return the request code, else zero.
 613   int uncommon_trap_request() const;
 614   static int extract_uncommon_trap_request(const Node* call);
 615 
 616   virtual int         Opcode() const;
 617 #ifndef PRODUCT
 618   virtual void        dump_spec(outputStream *st) const;
 619 #endif
 620 };
 621 
 622 //------------------------------CallDynamicJavaNode----------------------------
 623 // Make a dispatched call using Java calling convention.
 624 class CallDynamicJavaNode : public CallJavaNode {
 625   virtual uint cmp( const Node &n ) const;
 626   virtual uint size_of() const; // Size is bigger
 627 public:
 628   CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
 629     init_class_id(Class_CallDynamicJava);
 630   }
 631 
 632   int _vtable_index;
 633   virtual int   Opcode() const;
 634 #ifndef PRODUCT
 635   virtual void  dump_spec(outputStream *st) const;
 636 #endif
 637 };
 638 
 639 //------------------------------CallRuntimeNode--------------------------------
 640 // Make a direct subroutine call node into compiled C++ code.
 641 class CallRuntimeNode : public CallNode {
 642   virtual uint cmp( const Node &n ) const;
 643   virtual uint size_of() const; // Size is bigger
 644 public:
 645   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 646                   const TypePtr* adr_type)
 647     : CallNode(tf, addr, adr_type),
 648       _name(name)
 649   {
 650     init_class_id(Class_CallRuntime);
 651   }
 652 
 653   const char *_name;            // Printable name, if _method is NULL
 654   virtual int   Opcode() const;
 655   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 656 
 657 #ifndef PRODUCT
 658   virtual void  dump_spec(outputStream *st) const;
 659 #endif
 660 };
 661 
 662 //------------------------------CallLeafNode-----------------------------------
 663 // Make a direct subroutine call node into compiled C++ code, without
 664 // safepoints
 665 class CallLeafNode : public CallRuntimeNode {
 666 public:
 667   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 668                const TypePtr* adr_type)
 669     : CallRuntimeNode(tf, addr, name, adr_type)
 670   {
 671     init_class_id(Class_CallLeaf);
 672   }
 673   virtual int   Opcode() const;
 674   virtual bool        guaranteed_safepoint()  { return false; }
 675 #ifndef PRODUCT
 676   virtual void  dump_spec(outputStream *st) const;
 677 #endif
 678 };
 679 
 680 //------------------------------CallLeafNoFPNode-------------------------------
 681 // CallLeafNode, not using floating point or using it in the same manner as
 682 // the generated code
 683 class CallLeafNoFPNode : public CallLeafNode {
 684 public:
 685   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 686                    const TypePtr* adr_type)
 687     : CallLeafNode(tf, addr, name, adr_type)
 688   {
 689   }
 690   virtual int   Opcode() const;
 691 };
 692 
 693 
 694 //------------------------------Allocate---------------------------------------
 695 // High-level memory allocation
 696 //
 697 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 698 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 699 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 700 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 701 //  order to differentiate the uses of the projection on the normal control path from
 702 //  those on the exception return path.
 703 //
 704 class AllocateNode : public CallNode {
 705 public:
 706   enum {
 707     // Output:
 708     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 709     // Inputs:
 710     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 711     KlassNode,                        // type (maybe dynamic) of the obj.
 712     InitialTest,                      // slow-path test (may be constant)
 713     ALength,                          // array length (or TOP if none)
 714     ParmLimit
 715   };
 716 
 717   static const TypeFunc* alloc_type() {
 718     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 719     fields[AllocSize]   = TypeInt::POS;
 720     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 721     fields[InitialTest] = TypeInt::BOOL;
 722     fields[ALength]     = TypeInt::INT;  // length (can be a bad length)
 723 
 724     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 725 
 726     // create result type (range)
 727     fields = TypeTuple::fields(1);
 728     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 729 
 730     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 731 
 732     return TypeFunc::make(domain, range);
 733   }
 734 
 735   bool _is_scalar_replaceable;  // Result of Escape Analysis
 736 
 737   virtual uint size_of() const; // Size is bigger
 738   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 739                Node *size, Node *klass_node, Node *initial_test);
 740   // Expansion modifies the JVMState, so we need to clone it
 741   virtual void  clone_jvms() {
 742     set_jvms(jvms()->clone_deep(Compile::current()));
 743   }
 744   virtual int Opcode() const;
 745   virtual uint ideal_reg() const { return Op_RegP; }
 746   virtual bool        guaranteed_safepoint()  { return false; }
 747 
 748   // allocations do not modify their arguments
 749   virtual bool        may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
 750 
 751   // Pattern-match a possible usage of AllocateNode.
 752   // Return null if no allocation is recognized.
 753   // The operand is the pointer produced by the (possible) allocation.
 754   // It must be a projection of the Allocate or its subsequent CastPP.
 755   // (Note:  This function is defined in file graphKit.cpp, near
 756   // GraphKit::new_instance/new_array, whose output it recognizes.)
 757   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 758   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 759 
 760   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 761   // an offset, which is reported back to the caller.
 762   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
 763   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
 764                                         intptr_t& offset);
 765 
 766   // Dig the klass operand out of a (possible) allocation site.
 767   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
 768     AllocateNode* allo = Ideal_allocation(ptr, phase);
 769     return (allo == NULL) ? NULL : allo->in(KlassNode);
 770   }
 771 
 772   // Conservatively small estimate of offset of first non-header byte.
 773   int minimum_header_size() {
 774     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
 775                                 instanceOopDesc::base_offset_in_bytes();
 776   }
 777 
 778   // Return the corresponding initialization barrier (or null if none).
 779   // Walks out edges to find it...
 780   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
 781   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
 782   InitializeNode* initialization();
 783 
 784   // Convenience for initialization->maybe_set_complete(phase)
 785   bool maybe_set_complete(PhaseGVN* phase);
 786 };
 787 
 788 //------------------------------AllocateArray---------------------------------
 789 //
 790 // High-level array allocation
 791 //
 792 class AllocateArrayNode : public AllocateNode {
 793 public:
 794   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 795                     Node* size, Node* klass_node, Node* initial_test,
 796                     Node* count_val
 797                     )
 798     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
 799                    initial_test)
 800   {
 801     init_class_id(Class_AllocateArray);
 802     set_req(AllocateNode::ALength,        count_val);
 803   }
 804   virtual int Opcode() const;
 805   virtual uint size_of() const; // Size is bigger
 806   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 807 
 808   // Dig the length operand out of a array allocation site.
 809   Node* Ideal_length() {
 810     return in(AllocateNode::ALength);
 811   }
 812 
 813   // Dig the length operand out of a array allocation site and narrow the
 814   // type with a CastII, if necesssary
 815   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
 816 
 817   // Pattern-match a possible usage of AllocateArrayNode.
 818   // Return null if no allocation is recognized.
 819   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
 820     AllocateNode* allo = Ideal_allocation(ptr, phase);
 821     return (allo == NULL || !allo->is_AllocateArray())
 822            ? NULL : allo->as_AllocateArray();
 823   }
 824 };
 825 
 826 //------------------------------AbstractLockNode-----------------------------------
 827 class AbstractLockNode: public CallNode {
 828 private:
 829   bool _eliminate;    // indicates this lock can be safely eliminated
 830   bool _coarsened;    // indicates this lock was coarsened
 831 #ifndef PRODUCT
 832   NamedCounter* _counter;
 833 #endif
 834 
 835 protected:
 836   // helper functions for lock elimination
 837   //
 838 
 839   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
 840                             GrowableArray<AbstractLockNode*> &lock_ops);
 841   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
 842                                        GrowableArray<AbstractLockNode*> &lock_ops);
 843   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
 844                                GrowableArray<AbstractLockNode*> &lock_ops);
 845   LockNode *find_matching_lock(UnlockNode* unlock);
 846 
 847 
 848 public:
 849   AbstractLockNode(const TypeFunc *tf)
 850     : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
 851       _coarsened(false),
 852       _eliminate(false)
 853   {
 854 #ifndef PRODUCT
 855     _counter = NULL;
 856 #endif
 857   }
 858   virtual int Opcode() const = 0;
 859   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
 860   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
 861   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
 862   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
 863 
 864   virtual uint size_of() const { return sizeof(*this); }
 865 
 866   bool is_eliminated()         {return _eliminate; }
 867   // mark node as eliminated and update the counter if there is one
 868   void set_eliminated();
 869 
 870   bool is_coarsened()  { return _coarsened; }
 871   void set_coarsened() { _coarsened = true; }
 872 
 873   // locking does not modify its arguments
 874   virtual bool        may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
 875 
 876 #ifndef PRODUCT
 877   void create_lock_counter(JVMState* s);
 878   NamedCounter* counter() const { return _counter; }
 879 #endif
 880 };
 881 
 882 //------------------------------Lock---------------------------------------
 883 // High-level lock operation
 884 //
 885 // This is a subclass of CallNode because it is a macro node which gets expanded
 886 // into a code sequence containing a call.  This node takes 3 "parameters":
 887 //    0  -  object to lock
 888 //    1 -   a BoxLockNode
 889 //    2 -   a FastLockNode
 890 //
 891 class LockNode : public AbstractLockNode {
 892 public:
 893 
 894   static const TypeFunc *lock_type() {
 895     // create input type (domain)
 896     const Type **fields = TypeTuple::fields(3);
 897     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
 898     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
 899     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
 900     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
 901 
 902     // create result type (range)
 903     fields = TypeTuple::fields(0);
 904 
 905     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
 906 
 907     return TypeFunc::make(domain,range);
 908   }
 909 
 910   virtual int Opcode() const;
 911   virtual uint size_of() const; // Size is bigger
 912   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
 913     init_class_id(Class_Lock);
 914     init_flags(Flag_is_macro);
 915     C->add_macro_node(this);
 916   }
 917   virtual bool        guaranteed_safepoint()  { return false; }
 918 
 919   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 920   // Expansion modifies the JVMState, so we need to clone it
 921   virtual void  clone_jvms() {
 922     set_jvms(jvms()->clone_deep(Compile::current()));
 923   }
 924 };
 925 
 926 //------------------------------Unlock---------------------------------------
 927 // High-level unlock operation
 928 class UnlockNode : public AbstractLockNode {
 929 public:
 930   virtual int Opcode() const;
 931   virtual uint size_of() const; // Size is bigger
 932   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
 933     init_class_id(Class_Unlock);
 934     init_flags(Flag_is_macro);
 935     C->add_macro_node(this);
 936   }
 937   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 938   // unlock is never a safepoint
 939   virtual bool        guaranteed_safepoint()  { return false; }
 940 };