src/share/vm/opto/callnode.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6833129 Sdiff src/share/vm/opto

src/share/vm/opto/callnode.hpp

Print this page




 161 // Pop stack frame and jump indirect
 162 class TailJumpNode : public ReturnNode {
 163 public:
 164   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 165     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 166     init_req(TypeFunc::Parms, target);
 167     init_req(TypeFunc::Parms+1, ex_oop);
 168   }
 169 
 170   virtual int Opcode() const;
 171   virtual uint match_edge(uint idx) const;
 172 };
 173 
 174 //-------------------------------JVMState-------------------------------------
 175 // A linked list of JVMState nodes captures the whole interpreter state,
 176 // plus GC roots, for all active calls at some call site in this compilation
 177 // unit.  (If there is no inlining, then the list has exactly one link.)
 178 // This provides a way to map the optimized program back into the interpreter,
 179 // or to let the GC mark the stack.
 180 class JVMState : public ResourceObj {







 181 private:
 182   JVMState*         _caller;    // List pointer for forming scope chains
 183   uint              _depth;     // One mroe than caller depth, or one.
 184   uint              _locoff;    // Offset to locals in input edge mapping
 185   uint              _stkoff;    // Offset to stack in input edge mapping
 186   uint              _monoff;    // Offset to monitors in input edge mapping
 187   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 188   uint              _endoff;    // Offset to end of input edge mapping
 189   uint              _sp;        // Jave Expression Stack Pointer for this state
 190   int               _bci;       // Byte Code Index of this JVM point

 191   ciMethod*         _method;    // Method Pointer
 192   SafePointNode*    _map;       // Map node associated with this scope
 193 public:
 194   friend class Compile;

 195 
 196   // Because JVMState objects live over the entire lifetime of the
 197   // Compile object, they are allocated into the comp_arena, which
 198   // does not get resource marked or reset during the compile process
 199   void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
 200   void operator delete( void * ) { } // fast deallocation
 201 
 202   // Create a new JVMState, ready for abstract interpretation.
 203   JVMState(ciMethod* method, JVMState* caller);
 204   JVMState(int stack_size);  // root state; has a null method
 205 
 206   // Access functions for the JVM
 207   uint              locoff() const { return _locoff; }
 208   uint              stkoff() const { return _stkoff; }
 209   uint              argoff() const { return _stkoff + _sp; }
 210   uint              monoff() const { return _monoff; }
 211   uint              scloff() const { return _scloff; }
 212   uint              endoff() const { return _endoff; }
 213   uint              oopoff() const { return debug_end(); }
 214 
 215   int            loc_size() const { return _stkoff - _locoff; }
 216   int            stk_size() const { return _monoff - _stkoff; }
 217   int            mon_size() const { return _scloff - _monoff; }
 218   int            scl_size() const { return _endoff - _scloff; }
 219 
 220   bool        is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
 221   bool        is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
 222   bool        is_mon(uint i) const { return i >= _monoff && i < _scloff; }
 223   bool        is_scl(uint i) const { return i >= _scloff && i < _endoff; }
 224 
 225   uint              sp()     const { return _sp; }
 226   int               bci()    const { return _bci; }


 227   bool          has_method() const { return _method != NULL; }
 228   ciMethod*         method() const { assert(has_method(), ""); return _method; }
 229   JVMState*         caller() const { return _caller; }
 230   SafePointNode*    map()    const { return _map; }
 231   uint              depth()  const { return _depth; }
 232   uint        debug_start()  const; // returns locoff of root caller
 233   uint        debug_end()    const; // returns endoff of self
 234   uint        debug_size()   const {
 235     return loc_size() + sp() + mon_size() + scl_size();
 236   }
 237   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 238 
 239   // Returns the JVM state at the desired depth (1 == root).
 240   JVMState* of_depth(int d) const;
 241 
 242   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 243   bool same_calls_as(const JVMState* that) const;
 244 
 245   // Monitors (monitors are stored as (boxNode, objNode) pairs
 246   enum { logMonitorEdges = 1 };


 250   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 251   bool is_monitor_box(uint off)    const {
 252     assert(is_mon(off), "should be called only for monitor edge");
 253     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 254   }
 255   bool is_monitor_use(uint off)    const { return (is_mon(off)
 256                                                    && is_monitor_box(off))
 257                                              || (caller() && caller()->is_monitor_use(off)); }
 258 
 259   // Initialization functions for the JVM
 260   void              set_locoff(uint off) { _locoff = off; }
 261   void              set_stkoff(uint off) { _stkoff = off; }
 262   void              set_monoff(uint off) { _monoff = off; }
 263   void              set_scloff(uint off) { _scloff = off; }
 264   void              set_endoff(uint off) { _endoff = off; }
 265   void              set_offsets(uint off) {
 266     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 267   }
 268   void              set_map(SafePointNode *map) { _map = map; }
 269   void              set_sp(uint sp) { _sp = sp; }
 270   void              set_bci(int bci) { _bci = bci; }



 271 
 272   // Miscellaneous utility functions
 273   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 274   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 275 
 276 #ifndef PRODUCT
 277   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 278   void      dump_spec(outputStream *st) const;
 279   void      dump_on(outputStream* st) const;
 280   void      dump() const {
 281     dump_on(tty);
 282   }
 283 #endif
 284 };
 285 
 286 //------------------------------SafePointNode----------------------------------
 287 // A SafePointNode is a subclass of a MultiNode for convenience (and
 288 // potential code sharing) only - conceptually it is independent of
 289 // the Node semantics.
 290 class SafePointNode : public MultiNode {




 161 // Pop stack frame and jump indirect
 162 class TailJumpNode : public ReturnNode {
 163 public:
 164   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
 165     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
 166     init_req(TypeFunc::Parms, target);
 167     init_req(TypeFunc::Parms+1, ex_oop);
 168   }
 169 
 170   virtual int Opcode() const;
 171   virtual uint match_edge(uint idx) const;
 172 };
 173 
 174 //-------------------------------JVMState-------------------------------------
 175 // A linked list of JVMState nodes captures the whole interpreter state,
 176 // plus GC roots, for all active calls at some call site in this compilation
 177 // unit.  (If there is no inlining, then the list has exactly one link.)
 178 // This provides a way to map the optimized program back into the interpreter,
 179 // or to let the GC mark the stack.
 180 class JVMState : public ResourceObj {
 181 public:
 182   typedef enum {
 183     Reexecute_Undefined = -1, // not defined -- will be translated into false later
 184     Reexecute_False     =  0, // false       -- do not reexecute
 185     Reexecute_True      =  1  // true        -- reexecute the bytecode
 186   } ReexecuteState; //Reexecute State
 187 
 188 private:
 189   JVMState*         _caller;    // List pointer for forming scope chains
 190   uint              _depth;     // One mroe than caller depth, or one.
 191   uint              _locoff;    // Offset to locals in input edge mapping
 192   uint              _stkoff;    // Offset to stack in input edge mapping
 193   uint              _monoff;    // Offset to monitors in input edge mapping
 194   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
 195   uint              _endoff;    // Offset to end of input edge mapping
 196   uint              _sp;        // Jave Expression Stack Pointer for this state
 197   int               _bci;       // Byte Code Index of this JVM point
 198   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
 199   ciMethod*         _method;    // Method Pointer
 200   SafePointNode*    _map;       // Map node associated with this scope
 201 public:
 202   friend class Compile;
 203   friend class PreserveReexecuteState;
 204 
 205   // Because JVMState objects live over the entire lifetime of the
 206   // Compile object, they are allocated into the comp_arena, which
 207   // does not get resource marked or reset during the compile process
 208   void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
 209   void operator delete( void * ) { } // fast deallocation
 210 
 211   // Create a new JVMState, ready for abstract interpretation.
 212   JVMState(ciMethod* method, JVMState* caller);
 213   JVMState(int stack_size);  // root state; has a null method
 214 
 215   // Access functions for the JVM
 216   uint              locoff() const { return _locoff; }
 217   uint              stkoff() const { return _stkoff; }
 218   uint              argoff() const { return _stkoff + _sp; }
 219   uint              monoff() const { return _monoff; }
 220   uint              scloff() const { return _scloff; }
 221   uint              endoff() const { return _endoff; }
 222   uint              oopoff() const { return debug_end(); }
 223 
 224   int            loc_size() const { return _stkoff - _locoff; }
 225   int            stk_size() const { return _monoff - _stkoff; }
 226   int            mon_size() const { return _scloff - _monoff; }
 227   int            scl_size() const { return _endoff - _scloff; }
 228 
 229   bool        is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
 230   bool        is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
 231   bool        is_mon(uint i) const { return i >= _monoff && i < _scloff; }
 232   bool        is_scl(uint i) const { return i >= _scloff && i < _endoff; }
 233 
 234   uint                      sp() const { return _sp; }
 235   int                      bci() const { return _bci; }
 236   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
 237   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
 238   bool              has_method() const { return _method != NULL; }
 239   ciMethod*             method() const { assert(has_method(), ""); return _method; }
 240   JVMState*             caller() const { return _caller; }
 241   SafePointNode*           map() const { return _map; }
 242   uint                   depth() const { return _depth; }
 243   uint             debug_start() const; // returns locoff of root caller
 244   uint               debug_end() const; // returns endoff of self
 245   uint              debug_size() const {
 246     return loc_size() + sp() + mon_size() + scl_size();
 247   }
 248   uint        debug_depth()  const; // returns sum of debug_size values at all depths
 249 
 250   // Returns the JVM state at the desired depth (1 == root).
 251   JVMState* of_depth(int d) const;
 252 
 253   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
 254   bool same_calls_as(const JVMState* that) const;
 255 
 256   // Monitors (monitors are stored as (boxNode, objNode) pairs
 257   enum { logMonitorEdges = 1 };


 261   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
 262   bool is_monitor_box(uint off)    const {
 263     assert(is_mon(off), "should be called only for monitor edge");
 264     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
 265   }
 266   bool is_monitor_use(uint off)    const { return (is_mon(off)
 267                                                    && is_monitor_box(off))
 268                                              || (caller() && caller()->is_monitor_use(off)); }
 269 
 270   // Initialization functions for the JVM
 271   void              set_locoff(uint off) { _locoff = off; }
 272   void              set_stkoff(uint off) { _stkoff = off; }
 273   void              set_monoff(uint off) { _monoff = off; }
 274   void              set_scloff(uint off) { _scloff = off; }
 275   void              set_endoff(uint off) { _endoff = off; }
 276   void              set_offsets(uint off) {
 277     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
 278   }
 279   void              set_map(SafePointNode *map) { _map = map; }
 280   void              set_sp(uint sp) { _sp = sp; }
 281   //Note: _reexecute should always be undefined when a new _bci is set
 282   void              set_bci(int bci) {assert(_reexecute==Reexecute_Undefined || _bci==bci, "sanity check"); _bci = bci; }
 283   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
 284   void              set_reexecute_undefined() {_reexecute = Reexecute_Undefined; }
 285 
 286   // Miscellaneous utility functions
 287   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
 288   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
 289 
 290 #ifndef PRODUCT
 291   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
 292   void      dump_spec(outputStream *st) const;
 293   void      dump_on(outputStream* st) const;
 294   void      dump() const {
 295     dump_on(tty);
 296   }
 297 #endif
 298 };
 299 
 300 //------------------------------SafePointNode----------------------------------
 301 // A SafePointNode is a subclass of a MultiNode for convenience (and
 302 // potential code sharing) only - conceptually it is independent of
 303 // the Node semantics.
 304 class SafePointNode : public MultiNode {


src/share/vm/opto/callnode.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File