1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
  26 #define SHARE_VM_OPTO_CALLNODE_HPP
  27 
  28 #include "opto/connode.hpp"
  29 #include "opto/mulnode.hpp"
  30 #include "opto/multnode.hpp"
  31 #include "opto/opcodes.hpp"
  32 #include "opto/phaseX.hpp"
  33 #include "opto/type.hpp"
  34 
  35 // Portions of code courtesy of Clifford Click
  36 
  37 // Optimization - Graph Style
  38 
  39 class Chaitin;
  40 class NamedCounter;
  41 class MultiNode;
  42 class  SafePointNode;
  43 class   CallNode;
  44 class     CallJavaNode;
  45 class       CallStaticJavaNode;
  46 class       CallDynamicJavaNode;
  47 class     CallRuntimeNode;
  48 class       CallLeafNode;
  49 class         CallLeafNoFPNode;
  50 class     AllocateNode;
  51 class       AllocateArrayNode;
  52 class     BoxLockNode;
  53 class     LockNode;
  54 class     UnlockNode;
  55 class JVMState;
  56 class OopMap;
  57 class State;
  58 class StartNode;
  59 class MachCallNode;
  60 class FastLockNode;
  61 
  62 //------------------------------StartNode--------------------------------------
  63 // The method start node
  64 class StartNode : public MultiNode {
  65   virtual uint cmp( const Node &n ) const;
  66   virtual uint size_of() const; // Size is bigger
  67 public:
  68   const TypeTuple *_domain;
  69   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  70     init_class_id(Class_Start);
  71     init_req(0,this);
  72     init_req(1,root);
  73   }
  74   virtual int Opcode() const;
  75   virtual bool pinned() const { return true; };
  76   virtual const Type *bottom_type() const;
  77   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  78   virtual const Type *Value( PhaseTransform *phase ) const;
  79   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  80   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  81   virtual const RegMask &in_RegMask(uint) const;
  82   virtual Node *match( const ProjNode *proj, const Matcher *m );
  83   virtual uint ideal_reg() const { return 0; }
  84 #ifndef PRODUCT
  85   virtual void  dump_spec(outputStream *st) const;
  86 #endif
  87 };
  88 
  89 //------------------------------StartOSRNode-----------------------------------
  90 // The method start node for on stack replacement code
  91 class StartOSRNode : public StartNode {
  92 public:
  93   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  94   virtual int   Opcode() const;
  95   static  const TypeTuple *osr_domain();
  96 };
  97 
  98 
  99 //------------------------------ParmNode---------------------------------------
 100 // Incoming parameters
 101 class ParmNode : public ProjNode {
 102   static const char * const names[TypeFunc::Parms+1];
 103 public:
 104   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 105     init_class_id(Class_Parm);
 106   }
 107   virtual int Opcode() const;
 108   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 109   virtual uint ideal_reg() const;
 110 #ifndef PRODUCT
 111   virtual void dump_spec(outputStream *st) const;
 112 #endif
 113 };
 114 
 115 
 116 //------------------------------ReturnNode-------------------------------------
 117 // Return from subroutine node
 118 class ReturnNode : public Node {
 119 public:
 120   ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
 121   virtual int Opcode() const;
 122   virtual bool  is_CFG() const { return true; }
 123   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 124   virtual bool depends_only_on_test() const { return false; }
 125   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 126   virtual const Type *Value( PhaseTransform *phase ) const;
 127   virtual uint ideal_reg() const { return NotAMachineReg; }
 128   virtual uint match_edge(uint idx) const;
 129 #ifndef PRODUCT
 130   virtual void dump_req(outputStream *st = tty) const;
 131 #endif
 132 };
 133 
 134 
 135 //------------------------------RethrowNode------------------------------------
 136 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
 137 // ends the current basic block like a ReturnNode.  Restores registers and
 138 // unwinds stack.  Rethrow happens in the caller's method.
 139 class RethrowNode : public Node {
 140  public:
 141   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
 142   virtual int Opcode() const;
 143   virtual bool  is_CFG() const { return true; }
 144   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
 145   virtual bool depends_only_on_test() const { return false; }
 146   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 147   virtual const Type *Value( PhaseTransform *phase ) const;
 148   virtual uint match_edge(uint idx) const;
 149   virtual uint ideal_reg() const { return NotAMachineReg; }
 150 #ifndef PRODUCT
 151   virtual void dump_req(outputStream *st = tty) const;
 152 #endif
 153 };
 154 
 155 
 156 //------------------------------TailCallNode-----------------------------------
 157 // Pop stack frame and jump indirect
 158 class TailCallNode : public ReturnNode {
 159 public:
 160   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
 161     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
 162     init_req(TypeFunc::Parms, target);
 163     init_req(TypeFunc::Parms+1, moop);
 164   }
 165 
 166   virtual int Opcode() const;
 167   virtual uint match_edge(uint idx) const;
 168 };
 169 
 170 //------------------------------TailJumpNode-----------------------------------
 171 // Pop stack frame and jump indirect
 172 class TailJumpNode : public ReturnNode {
 173 public:
 174   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 175     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 176     init_req(TypeFunc::Parms, target);
 177     init_req(TypeFunc::Parms+1, ex_oop);
 178   }
 179 
 180   virtual int Opcode() const;
 181   virtual uint match_edge(uint idx) const;
 182 };
 183 
 184 //-------------------------------JVMState-------------------------------------
 185 // A linked list of JVMState nodes captures the whole interpreter state,
 186 // plus GC roots, for all active calls at some call site in this compilation
 187 // unit.  (If there is no inlining, then the list has exactly one link.)
 188 // This provides a way to map the optimized program back into the interpreter,
 189 // or to let the GC mark the stack.
 190 class JVMState : public ResourceObj {
 191   friend class VMStructs;
 192 public:
 193   typedef enum {
 194     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 195     Reexecute_False     =  0, // false       -- do not reexecute
 196     Reexecute_True      =  1  // true        -- reexecute the bytecode
 197   } ReexecuteState; //Reexecute State
 198 
 199 private:
 200   JVMState*         _caller;    // List pointer for forming scope chains
 201   uint              _depth;     // One more than caller depth, or one.
 202   uint              _locoff;    // Offset to locals in input edge mapping
 203   uint              _stkoff;    // Offset to stack in input edge mapping
 204   uint              _monoff;    // Offset to monitors in input edge mapping
 205   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 206   uint              _endoff;    // Offset to end of input edge mapping
 207   uint              _sp;        // Jave Expression Stack Pointer for this state
 208   int               _bci;       // Byte Code Index of this JVM point
 209   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 210   ciMethod*         _method;    // Method Pointer
 211   SafePointNode*    _map;       // Map node associated with this scope
 212 public:
 213   friend class Compile;
 214   friend class PreserveReexecuteState;
 215 
 216   // Because JVMState objects live over the entire lifetime of the
 217   // Compile object, they are allocated into the comp_arena, which
 218   // does not get resource marked or reset during the compile process
 219   void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
 220   void operator delete( void * ) { } // fast deallocation
 221 
 222   // Create a new JVMState, ready for abstract interpretation.
 223   JVMState(ciMethod* method, JVMState* caller);
 224   JVMState(int stack_size);  // root state; has a null method
 225 
 226   // Access functions for the JVM
 227   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
 228   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
 229   uint              locoff() const { return _locoff; }
 230   uint              stkoff() const { return _stkoff; }
 231   uint              argoff() const { return _stkoff + _sp; }
 232   uint              monoff() const { return _monoff; }
 233   uint              scloff() const { return _scloff; }
 234   uint              endoff() const { return _endoff; }
 235   uint              oopoff() const { return debug_end(); }
 236 
 237   int            loc_size() const { return stkoff() - locoff(); }
 238   int            stk_size() const { return monoff() - stkoff(); }
 239   int            mon_size() const { return scloff() - monoff(); }
 240   int            scl_size() const { return endoff() - scloff(); }
 241 
 242   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
 243   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
 244   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
 245   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
 246 
 247   uint                      sp() const { return _sp; }
 248   int                      bci() const { return _bci; }
 249   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 250   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 251   bool              has_method() const { return _method != NULL; }
 252   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 253   JVMState*             caller() const { return _caller; }
 254   SafePointNode*           map() const { return _map; }
 255   uint                   depth() const { return _depth; }
 256   uint             debug_start() const; // returns locoff of root caller
 257   uint               debug_end() const; // returns endoff of self
 258   uint              debug_size() const {
 259     return loc_size() + sp() + mon_size() + scl_size();
 260   }
 261   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 262 
 263   // Returns the JVM state at the desired depth (1 == root).
 264   JVMState* of_depth(int d) const;
 265 
 266   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 267   bool same_calls_as(const JVMState* that) const;
 268 
 269   // Monitors (monitors are stored as (boxNode, objNode) pairs
 270   enum { logMonitorEdges = 1 };
 271   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
 272   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
 273   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
 274   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 275   bool is_monitor_box(uint off)    const {
 276     assert(is_mon(off), "should be called only for monitor edge");
 277     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 278   }
 279   bool is_monitor_use(uint off)    const { return (is_mon(off)
 280                                                    && is_monitor_box(off))
 281                                              || (caller() && caller()->is_monitor_use(off)); }
 282 
 283   // Initialization functions for the JVM
 284   void              set_locoff(uint off) { _locoff = off; }
 285   void              set_stkoff(uint off) { _stkoff = off; }
 286   void              set_monoff(uint off) { _monoff = off; }
 287   void              set_scloff(uint off) { _scloff = off; }
 288   void              set_endoff(uint off) { _endoff = off; }
 289   void              set_offsets(uint off) {
 290     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 291   }
 292   void              set_map(SafePointNode *map) { _map = map; }
 293   void              set_sp(uint sp) { _sp = sp; }
 294                     // _reexecute is initialized to "undefined" for a new bci
 295   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
 296   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 297 
 298   // Miscellaneous utility functions
 299   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 300   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 301   void      set_map_deep(SafePointNode *map);// reset map for all callers
 302 
 303 #ifndef PRODUCT
 304   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 305   void      dump_spec(outputStream *st) const;
 306   void      dump_on(outputStream* st) const;
 307   void      dump() const {
 308     dump_on(tty);
 309   }
 310 #endif
 311 };
 312 
 313 //------------------------------SafePointNode----------------------------------
 314 // A SafePointNode is a subclass of a MultiNode for convenience (and
 315 // potential code sharing) only - conceptually it is independent of
 316 // the Node semantics.
 317 class SafePointNode : public MultiNode {
 318   virtual uint           cmp( const Node &n ) const;
 319   virtual uint           size_of() const;       // Size is bigger
 320 
 321 public:
 322   SafePointNode(uint edges, JVMState* jvms,
 323                 // A plain safepoint advertises no memory effects (NULL):
 324                 const TypePtr* adr_type = NULL)
 325     : MultiNode( edges ),
 326       _jvms(jvms),
 327       _oop_map(NULL),
 328       _adr_type(adr_type)
 329   {
 330     init_class_id(Class_SafePoint);
 331   }
 332 
 333   OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
 334   JVMState* const _jvms;      // Pointer to list of JVM State objects
 335   const TypePtr*  _adr_type;  // What type of memory does this node produce?
 336 
 337   // Many calls take *all* of memory as input,
 338   // but some produce a limited subset of that memory as output.
 339   // The adr_type reports the call's behavior as a store, not a load.
 340 
 341   virtual JVMState* jvms() const { return _jvms; }
 342   void set_jvms(JVMState* s) {
 343     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
 344   }
 345   OopMap *oop_map() const { return _oop_map; }
 346   void set_oop_map(OopMap *om) { _oop_map = om; }
 347 
 348  private:
 349   void verify_input(JVMState* jvms, uint idx) const {
 350     assert(verify_jvms(jvms), "jvms must match");
 351     Node* n = in(idx);
 352     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
 353            in(idx + 1)->is_top(), "2nd half of long/double");
 354   }
 355 
 356  public:
 357   // Functionality from old debug nodes which has changed
 358   Node *local(JVMState* jvms, uint idx) const {
 359     verify_input(jvms, jvms->locoff() + idx);
 360     return in(jvms->locoff() + idx);
 361   }
 362   Node *stack(JVMState* jvms, uint idx) const {
 363     verify_input(jvms, jvms->stkoff() + idx);
 364     return in(jvms->stkoff() + idx);
 365   }
 366   Node *argument(JVMState* jvms, uint idx) const {
 367     verify_input(jvms, jvms->argoff() + idx);
 368     return in(jvms->argoff() + idx);
 369   }
 370   Node *monitor_box(JVMState* jvms, uint idx) const {
 371     assert(verify_jvms(jvms), "jvms must match");
 372     return in(jvms->monitor_box_offset(idx));
 373   }
 374   Node *monitor_obj(JVMState* jvms, uint idx) const {
 375     assert(verify_jvms(jvms), "jvms must match");
 376     return in(jvms->monitor_obj_offset(idx));
 377   }
 378 
 379   void  set_local(JVMState* jvms, uint idx, Node *c);
 380 
 381   void  set_stack(JVMState* jvms, uint idx, Node *c) {
 382     assert(verify_jvms(jvms), "jvms must match");
 383     set_req(jvms->stkoff() + idx, c);
 384   }
 385   void  set_argument(JVMState* jvms, uint idx, Node *c) {
 386     assert(verify_jvms(jvms), "jvms must match");
 387     set_req(jvms->argoff() + idx, c);
 388   }
 389   void ensure_stack(JVMState* jvms, uint stk_size) {
 390     assert(verify_jvms(jvms), "jvms must match");
 391     int grow_by = (int)stk_size - (int)jvms->stk_size();
 392     if (grow_by > 0)  grow_stack(jvms, grow_by);
 393   }
 394   void grow_stack(JVMState* jvms, uint grow_by);
 395   // Handle monitor stack
 396   void push_monitor( const FastLockNode *lock );
 397   void pop_monitor ();
 398   Node *peek_monitor_box() const;
 399   Node *peek_monitor_obj() const;
 400 
 401   // Access functions for the JVM
 402   Node *control  () const { return in(TypeFunc::Control  ); }
 403   Node *i_o      () const { return in(TypeFunc::I_O      ); }
 404   Node *memory   () const { return in(TypeFunc::Memory   ); }
 405   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
 406   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
 407 
 408   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
 409   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
 410   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
 411 
 412   MergeMemNode* merged_memory() const {
 413     return in(TypeFunc::Memory)->as_MergeMem();
 414   }
 415 
 416   // The parser marks useless maps as dead when it's done with them:
 417   bool is_killed() { return in(TypeFunc::Control) == NULL; }
 418 
 419   // Exception states bubbling out of subgraphs such as inlined calls
 420   // are recorded here.  (There might be more than one, hence the "next".)
 421   // This feature is used only for safepoints which serve as "maps"
 422   // for JVM states during parsing, intrinsic expansion, etc.
 423   SafePointNode*         next_exception() const;
 424   void               set_next_exception(SafePointNode* n);
 425   bool                   has_exceptions() const { return next_exception() != NULL; }
 426 
 427   // Standard Node stuff
 428   virtual int            Opcode() const;
 429   virtual bool           pinned() const { return true; }
 430   virtual const Type    *Value( PhaseTransform *phase ) const;
 431   virtual const Type    *bottom_type() const { return Type::CONTROL; }
 432   virtual const TypePtr *adr_type() const { return _adr_type; }
 433   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
 434   virtual Node          *Identity( PhaseTransform *phase );
 435   virtual uint           ideal_reg() const { return 0; }
 436   virtual const RegMask &in_RegMask(uint) const;
 437   virtual const RegMask &out_RegMask() const;
 438   virtual uint           match_edge(uint idx) const;
 439 
 440   static  bool           needs_polling_address_input();
 441 
 442 #ifndef PRODUCT
 443   virtual void           dump_spec(outputStream *st) const;
 444 #endif
 445 };
 446 
 447 //------------------------------SafePointScalarObjectNode----------------------
 448 // A SafePointScalarObjectNode represents the state of a scalarized object
 449 // at a safepoint.
 450 
 451 class SafePointScalarObjectNode: public TypeNode {
 452   uint _first_index; // First input edge relative index of a SafePoint node where
 453                      // states of the scalarized object fields are collected.
 454                      // It is relative to the last (youngest) jvms->_scloff.
 455   uint _n_fields;    // Number of non-static fields of the scalarized object.
 456   DEBUG_ONLY(AllocateNode* _alloc;)
 457 
 458   virtual uint hash() const ; // { return NO_HASH; }
 459   virtual uint cmp( const Node &n ) const;
 460 
 461   uint first_index() const { return _first_index; }
 462 
 463 public:
 464   SafePointScalarObjectNode(const TypeOopPtr* tp,
 465 #ifdef ASSERT
 466                             AllocateNode* alloc,
 467 #endif
 468                             uint first_index, uint n_fields);
 469   virtual int Opcode() const;
 470   virtual uint           ideal_reg() const;
 471   virtual const RegMask &in_RegMask(uint) const;
 472   virtual const RegMask &out_RegMask() const;
 473   virtual uint           match_edge(uint idx) const;
 474 
 475   uint first_index(JVMState* jvms) const {
 476     assert(jvms != NULL, "missed JVMS");
 477     return jvms->scloff() + _first_index;
 478   }
 479   uint n_fields()    const { return _n_fields; }
 480 
 481 #ifdef ASSERT
 482   AllocateNode* alloc() const { return _alloc; }
 483 #endif
 484 
 485   virtual uint size_of() const { return sizeof(*this); }
 486 
 487   // Assumes that "this" is an argument to a safepoint node "s", and that
 488   // "new_call" is being created to correspond to "s".  But the difference
 489   // between the start index of the jvmstates of "new_call" and "s" is
 490   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 491   // corresponds appropriately to "this" in "new_call".  Assumes that
 492   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 493   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 494   SafePointScalarObjectNode* clone(Dict* sosn_map) const;
 495 
 496 #ifndef PRODUCT
 497   virtual void              dump_spec(outputStream *st) const;
 498 #endif
 499 };
 500 
 501 
 502 // Simple container for the outgoing projections of a call.  Useful
 503 // for serious surgery on calls.
 504 class CallProjections : public StackObj {
 505 public:
 506   Node* fallthrough_proj;
 507   Node* fallthrough_catchproj;
 508   Node* fallthrough_memproj;
 509   Node* fallthrough_ioproj;
 510   Node* catchall_catchproj;
 511   Node* catchall_memproj;
 512   Node* catchall_ioproj;
 513   Node* resproj;
 514   Node* exobj;
 515 };
 516 
 517 class CallGenerator;
 518 
 519 //------------------------------CallNode---------------------------------------
 520 // Call nodes now subsume the function of debug nodes at callsites, so they
 521 // contain the functionality of a full scope chain of debug nodes.
 522 class CallNode : public SafePointNode {
 523   friend class VMStructs;
 524 public:
 525   const TypeFunc *_tf;        // Function type
 526   address      _entry_point;  // Address of method being called
 527   float        _cnt;          // Estimate of number of times called
 528   CallGenerator* _generator;  // corresponding CallGenerator for some late inline calls
 529 
 530   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
 531     : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
 532       _tf(tf),
 533       _entry_point(addr),
 534       _cnt(COUNT_UNKNOWN),
 535       _generator(NULL)
 536   {
 537     init_class_id(Class_Call);
 538   }
 539 
 540   const TypeFunc* tf()         const { return _tf; }
 541   const address  entry_point() const { return _entry_point; }
 542   const float    cnt()         const { return _cnt; }
 543   CallGenerator* generator()   const { return _generator; }
 544 
 545   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 546   void set_entry_point(address p)       { _entry_point = p; }
 547   void set_cnt(float c)                 { _cnt = c; }
 548   void set_generator(CallGenerator* cg) { _generator = cg; }
 549 
 550   virtual const Type *bottom_type() const;
 551   virtual const Type *Value( PhaseTransform *phase ) const;
 552   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 553   virtual Node *Identity( PhaseTransform *phase ) { return this; }
 554   virtual uint        cmp( const Node &n ) const;
 555   virtual uint        size_of() const = 0;
 556   virtual void        calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 557   virtual Node       *match( const ProjNode *proj, const Matcher *m );
 558   virtual uint        ideal_reg() const { return NotAMachineReg; }
 559   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 560   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 561   virtual bool        guaranteed_safepoint()  { return true; }
 562   // For macro nodes, the JVMState gets modified during expansion, so when cloning
 563   // the node the JVMState must be cloned.
 564   virtual void        clone_jvms(Compile* C) { }   // default is not to clone
 565 
 566   // Returns true if the call may modify n
 567   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
 568   // Does this node have a use of n other than in debug information?
 569   bool                has_non_debug_use(Node *n);
 570   // Returns the unique CheckCastPP of a call
 571   // or result projection is there are several CheckCastPP
 572   // or returns NULL if there is no one.
 573   Node *result_cast();
 574   // Does this node returns pointer?
 575   bool returns_pointer() const {
 576     const TypeTuple *r = tf()->range();
 577     return (r->cnt() > TypeFunc::Parms &&
 578             r->field_at(TypeFunc::Parms)->isa_ptr());
 579   }
 580 
 581   // Collect all the interesting edges from a call for use in
 582   // replacing the call by something else.  Used by macro expansion
 583   // and the late inlining support.
 584   void extract_projections(CallProjections* projs, bool separate_io_proj);
 585 
 586   virtual uint match_edge(uint idx) const;
 587 
 588 #ifndef PRODUCT
 589   virtual void        dump_req(outputStream *st = tty) const;
 590   virtual void        dump_spec(outputStream *st) const;
 591 #endif
 592 };
 593 
 594 
 595 //------------------------------CallJavaNode-----------------------------------
 596 // Make a static or dynamic subroutine call node using Java calling
 597 // convention.  (The "Java" calling convention is the compiler's calling
 598 // convention, as opposed to the interpreter's or that of native C.)
 599 class CallJavaNode : public CallNode {
 600   friend class VMStructs;
 601 protected:
 602   virtual uint cmp( const Node &n ) const;
 603   virtual uint size_of() const; // Size is bigger
 604 
 605   bool    _optimized_virtual;
 606   bool    _method_handle_invoke;
 607   ciMethod* _method;            // Method being direct called
 608 public:
 609   const int       _bci;         // Byte Code Index of call byte code
 610   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
 611     : CallNode(tf, addr, TypePtr::BOTTOM),
 612       _method(method), _bci(bci),
 613       _optimized_virtual(false),
 614       _method_handle_invoke(false)
 615   {
 616     init_class_id(Class_CallJava);
 617   }
 618 
 619   virtual int   Opcode() const;
 620   ciMethod* method() const                { return _method; }
 621   void  set_method(ciMethod *m)           { _method = m; }
 622   void  set_optimized_virtual(bool f)     { _optimized_virtual = f; }
 623   bool  is_optimized_virtual() const      { return _optimized_virtual; }
 624   void  set_method_handle_invoke(bool f)  { _method_handle_invoke = f; }
 625   bool  is_method_handle_invoke() const   { return _method_handle_invoke; }
 626 
 627 #ifndef PRODUCT
 628   virtual void  dump_spec(outputStream *st) const;
 629 #endif
 630 };
 631 
 632 //------------------------------CallStaticJavaNode-----------------------------
 633 // Make a direct subroutine call using Java calling convention (for static
 634 // calls and optimized virtual calls, plus calls to wrappers for run-time
 635 // routines); generates static stub.
 636 class CallStaticJavaNode : public CallJavaNode {
 637   virtual uint cmp( const Node &n ) const;
 638   virtual uint size_of() const; // Size is bigger
 639 public:
 640   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
 641     : CallJavaNode(tf, addr, method, bci), _name(NULL) {
 642     init_class_id(Class_CallStaticJava);
 643     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
 644       init_flags(Flag_is_macro);
 645       C->add_macro_node(this);
 646     }
 647     _is_scalar_replaceable = false;
 648     _is_non_escaping = false;
 649   }
 650   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
 651                      const TypePtr* adr_type)
 652     : CallJavaNode(tf, addr, NULL, bci), _name(name) {
 653     init_class_id(Class_CallStaticJava);
 654     // This node calls a runtime stub, which often has narrow memory effects.
 655     _adr_type = adr_type;
 656     _is_scalar_replaceable = false;
 657     _is_non_escaping = false;
 658   }
 659   const char *_name;      // Runtime wrapper name
 660 
 661   // Result of Escape Analysis
 662   bool _is_scalar_replaceable;
 663   bool _is_non_escaping;
 664 
 665   // If this is an uncommon trap, return the request code, else zero.
 666   int uncommon_trap_request() const;
 667   static int extract_uncommon_trap_request(const Node* call);
 668 
 669   bool is_boxing_method() const {
 670     return is_macro() && (method() != NULL) && method()->is_boxing_method();
 671   }
 672   // Later inlining modifies the JVMState, so we need to clone it
 673   // when the call node is cloned (because it is macro node).
 674   virtual void  clone_jvms(Compile* C) {
 675     if ((jvms() != NULL) && is_boxing_method()) {
 676       set_jvms(jvms()->clone_deep(C));
 677       jvms()->set_map_deep(this);
 678     }
 679   }
 680 
 681   virtual int         Opcode() const;
 682 #ifndef PRODUCT
 683   virtual void        dump_spec(outputStream *st) const;
 684 #endif
 685 };
 686 
 687 //------------------------------CallDynamicJavaNode----------------------------
 688 // Make a dispatched call using Java calling convention.
 689 class CallDynamicJavaNode : public CallJavaNode {
 690   virtual uint cmp( const Node &n ) const;
 691   virtual uint size_of() const; // Size is bigger
 692 public:
 693   CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
 694     init_class_id(Class_CallDynamicJava);
 695   }
 696 
 697   int _vtable_index;
 698   virtual int   Opcode() const;
 699 #ifndef PRODUCT
 700   virtual void  dump_spec(outputStream *st) const;
 701 #endif
 702 };
 703 
 704 //------------------------------CallRuntimeNode--------------------------------
 705 // Make a direct subroutine call node into compiled C++ code.
 706 class CallRuntimeNode : public CallNode {
 707   virtual uint cmp( const Node &n ) const;
 708   virtual uint size_of() const; // Size is bigger
 709 public:
 710   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
 711                   const TypePtr* adr_type)
 712     : CallNode(tf, addr, adr_type),
 713       _name(name)
 714   {
 715     init_class_id(Class_CallRuntime);
 716   }
 717 
 718   const char *_name;            // Printable name, if _method is NULL
 719   virtual int   Opcode() const;
 720   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 721 
 722 #ifndef PRODUCT
 723   virtual void  dump_spec(outputStream *st) const;
 724 #endif
 725 };
 726 
 727 //------------------------------CallLeafNode-----------------------------------
 728 // Make a direct subroutine call node into compiled C++ code, without
 729 // safepoints
 730 class CallLeafNode : public CallRuntimeNode {
 731 public:
 732   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
 733                const TypePtr* adr_type)
 734     : CallRuntimeNode(tf, addr, name, adr_type)
 735   {
 736     init_class_id(Class_CallLeaf);
 737   }
 738   virtual int   Opcode() const;
 739   virtual bool        guaranteed_safepoint()  { return false; }
 740 #ifndef PRODUCT
 741   virtual void  dump_spec(outputStream *st) const;
 742 #endif
 743 };
 744 
 745 //------------------------------CallLeafNoFPNode-------------------------------
 746 // CallLeafNode, not using floating point or using it in the same manner as
 747 // the generated code
 748 class CallLeafNoFPNode : public CallLeafNode {
 749 public:
 750   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 751                    const TypePtr* adr_type)
 752     : CallLeafNode(tf, addr, name, adr_type)
 753   {
 754   }
 755   virtual int   Opcode() const;
 756 };
 757 
 758 
 759 //------------------------------Allocate---------------------------------------
 760 // High-level memory allocation
 761 //
 762 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 763 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 764 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 765 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 766 //  order to differentiate the uses of the projection on the normal control path from
 767 //  those on the exception return path.
 768 //
 769 class AllocateNode : public CallNode {
 770 public:
 771   enum {
 772     // Output:
 773     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 774     // Inputs:
 775     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 776     KlassNode,                        // type (maybe dynamic) of the obj.
 777     InitialTest,                      // slow-path test (may be constant)
 778     ALength,                          // array length (or TOP if none)
 779     ParmLimit
 780   };
 781 
 782   static const TypeFunc* alloc_type(const Type* t) {
 783     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 784     fields[AllocSize]   = TypeInt::POS;
 785     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 786     fields[InitialTest] = TypeInt::BOOL;
 787     fields[ALength]     = t;  // length (can be a bad length)
 788 
 789     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 790 
 791     // create result type (range)
 792     fields = TypeTuple::fields(1);
 793     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 794 
 795     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 796 
 797     return TypeFunc::make(domain, range);
 798   }
 799 
 800   // Result of Escape Analysis
 801   bool _is_scalar_replaceable;
 802   bool _is_non_escaping;
 803 
 804   virtual uint size_of() const; // Size is bigger
 805   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 806                Node *size, Node *klass_node, Node *initial_test);
 807   // Expansion modifies the JVMState, so we need to clone it
 808   virtual void  clone_jvms(Compile* C) {
 809     if (jvms() != NULL) {
 810       set_jvms(jvms()->clone_deep(C));
 811       jvms()->set_map_deep(this);
 812     }
 813   }
 814   virtual int Opcode() const;
 815   virtual uint ideal_reg() const { return Op_RegP; }
 816   virtual bool        guaranteed_safepoint()  { return false; }
 817 
 818   // allocations do not modify their arguments
 819   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
 820 
 821   // Pattern-match a possible usage of AllocateNode.
 822   // Return null if no allocation is recognized.
 823   // The operand is the pointer produced by the (possible) allocation.
 824   // It must be a projection of the Allocate or its subsequent CastPP.
 825   // (Note:  This function is defined in file graphKit.cpp, near
 826   // GraphKit::new_instance/new_array, whose output it recognizes.)
 827   // The 'ptr' may not have an offset unless the 'offset' argument is given.
 828   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
 829 
 830   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
 831   // an offset, which is reported back to the caller.
 832   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
 833   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
 834                                         intptr_t& offset);
 835 
 836   // Dig the klass operand out of a (possible) allocation site.
 837   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
 838     AllocateNode* allo = Ideal_allocation(ptr, phase);
 839     return (allo == NULL) ? NULL : allo->in(KlassNode);
 840   }
 841 
 842   // Conservatively small estimate of offset of first non-header byte.
 843   int minimum_header_size() {
 844     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
 845                                 instanceOopDesc::base_offset_in_bytes();
 846   }
 847 
 848   // Return the corresponding initialization barrier (or null if none).
 849   // Walks out edges to find it...
 850   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
 851   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
 852   InitializeNode* initialization();
 853 
 854   // Convenience for initialization->maybe_set_complete(phase)
 855   bool maybe_set_complete(PhaseGVN* phase);
 856 };
 857 
 858 //------------------------------AllocateArray---------------------------------
 859 //
 860 // High-level array allocation
 861 //
 862 class AllocateArrayNode : public AllocateNode {
 863 public:
 864   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 865                     Node* size, Node* klass_node, Node* initial_test,
 866                     Node* count_val
 867                     )
 868     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
 869                    initial_test)
 870   {
 871     init_class_id(Class_AllocateArray);
 872     set_req(AllocateNode::ALength,        count_val);
 873   }
 874   virtual int Opcode() const;
 875   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 876 
 877   // Dig the length operand out of a array allocation site.
 878   Node* Ideal_length() {
 879     return in(AllocateNode::ALength);
 880   }
 881 
 882   // Dig the length operand out of a array allocation site and narrow the
 883   // type with a CastII, if necesssary
 884   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
 885 
 886   // Pattern-match a possible usage of AllocateArrayNode.
 887   // Return null if no allocation is recognized.
 888   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
 889     AllocateNode* allo = Ideal_allocation(ptr, phase);
 890     return (allo == NULL || !allo->is_AllocateArray())
 891            ? NULL : allo->as_AllocateArray();
 892   }
 893 };
 894 
 895 //------------------------------AbstractLockNode-----------------------------------
 896 class AbstractLockNode: public CallNode {
 897 private:
 898   enum {
 899     Regular = 0,  // Normal lock
 900     NonEscObj,    // Lock is used for non escaping object
 901     Coarsened,    // Lock was coarsened
 902     Nested        // Nested lock
 903   } _kind;
 904 #ifndef PRODUCT
 905   NamedCounter* _counter;
 906 #endif
 907 
 908 protected:
 909   // helper functions for lock elimination
 910   //
 911 
 912   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
 913                             GrowableArray<AbstractLockNode*> &lock_ops);
 914   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
 915                                        GrowableArray<AbstractLockNode*> &lock_ops);
 916   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
 917                                GrowableArray<AbstractLockNode*> &lock_ops);
 918   LockNode *find_matching_lock(UnlockNode* unlock);
 919 
 920   // Update the counter to indicate that this lock was eliminated.
 921   void set_eliminated_lock_counter() PRODUCT_RETURN;
 922 
 923 public:
 924   AbstractLockNode(const TypeFunc *tf)
 925     : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
 926       _kind(Regular)
 927   {
 928 #ifndef PRODUCT
 929     _counter = NULL;
 930 #endif
 931   }
 932   virtual int Opcode() const = 0;
 933   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
 934   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
 935   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
 936   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
 937 
 938   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
 939 
 940   virtual uint size_of() const { return sizeof(*this); }
 941 
 942   bool is_eliminated()  const { return (_kind != Regular); }
 943   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
 944   bool is_coarsened()   const { return (_kind == Coarsened); }
 945   bool is_nested()      const { return (_kind == Nested); }
 946 
 947   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
 948   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
 949   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
 950 
 951   // locking does not modify its arguments
 952   virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
 953 
 954 #ifndef PRODUCT
 955   void create_lock_counter(JVMState* s);
 956   NamedCounter* counter() const { return _counter; }
 957 #endif
 958 };
 959 
 960 //------------------------------Lock---------------------------------------
 961 // High-level lock operation
 962 //
 963 // This is a subclass of CallNode because it is a macro node which gets expanded
 964 // into a code sequence containing a call.  This node takes 3 "parameters":
 965 //    0  -  object to lock
 966 //    1 -   a BoxLockNode
 967 //    2 -   a FastLockNode
 968 //
 969 class LockNode : public AbstractLockNode {
 970 public:
 971 
 972   static const TypeFunc *lock_type() {
 973     // create input type (domain)
 974     const Type **fields = TypeTuple::fields(3);
 975     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
 976     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
 977     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
 978     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
 979 
 980     // create result type (range)
 981     fields = TypeTuple::fields(0);
 982 
 983     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
 984 
 985     return TypeFunc::make(domain,range);
 986   }
 987 
 988   virtual int Opcode() const;
 989   virtual uint size_of() const; // Size is bigger
 990   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
 991     init_class_id(Class_Lock);
 992     init_flags(Flag_is_macro);
 993     C->add_macro_node(this);
 994   }
 995   virtual bool        guaranteed_safepoint()  { return false; }
 996 
 997   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 998   // Expansion modifies the JVMState, so we need to clone it
 999   virtual void  clone_jvms(Compile* C) {
1000     if (jvms() != NULL) {
1001       set_jvms(jvms()->clone_deep(C));
1002       jvms()->set_map_deep(this);
1003     }
1004   }
1005 
1006   bool is_nested_lock_region(); // Is this Lock nested?
1007 };
1008 
1009 //------------------------------Unlock---------------------------------------
1010 // High-level unlock operation
1011 class UnlockNode : public AbstractLockNode {
1012 public:
1013   virtual int Opcode() const;
1014   virtual uint size_of() const; // Size is bigger
1015   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1016     init_class_id(Class_Unlock);
1017     init_flags(Flag_is_macro);
1018     C->add_macro_node(this);
1019   }
1020   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1021   // unlock is never a safepoint
1022   virtual bool        guaranteed_safepoint()  { return false; }
1023 };
1024 
1025 #endif // SHARE_VM_OPTO_CALLNODE_HPP