1 /* 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 // Portions of code courtesy of Clifford Click 26 27 // Optimization - Graph Style 28 29 class Chaitin; 30 class NamedCounter; 31 class MultiNode; 32 class SafePointNode; 33 class CallNode; 34 class CallJavaNode; 35 class CallStaticJavaNode; 36 class CallDynamicJavaNode; 37 class CallRuntimeNode; 38 class CallLeafNode; 39 class CallLeafNoFPNode; 40 class AllocateNode; 41 class AllocateArrayNode; 42 class LockNode; 43 class UnlockNode; 44 class JVMState; 45 class OopMap; 46 class State; 47 class StartNode; 48 class MachCallNode; 49 class FastLockNode; 50 51 //------------------------------StartNode-------------------------------------- 52 // The method start node 53 class StartNode : public MultiNode { 54 virtual uint cmp( const Node &n ) const; 55 virtual uint size_of() const; // Size is bigger 56 public: 57 const TypeTuple *_domain; 58 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 59 init_class_id(Class_Start); 60 init_flags(Flag_is_block_start); 61 init_req(0,this); 62 init_req(1,root); 63 } 64 virtual int Opcode() const; 65 virtual bool pinned() const { return true; }; 66 virtual const Type *bottom_type() const; 67 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 68 virtual const Type *Value( PhaseTransform *phase ) const; 69 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 70 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 71 virtual const RegMask &in_RegMask(uint) const; 72 virtual Node *match( const ProjNode *proj, const Matcher *m ); 73 virtual uint ideal_reg() const { return 0; } 74 #ifndef PRODUCT 75 virtual void dump_spec(outputStream *st) const; 76 #endif 77 }; 78 79 //------------------------------StartOSRNode----------------------------------- 80 // The method start node for on stack replacement code 81 class StartOSRNode : public StartNode { 82 public: 83 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 84 virtual int Opcode() const; 85 static const TypeTuple *osr_domain(); 86 }; 87 88 89 //------------------------------ParmNode--------------------------------------- 90 // Incoming parameters 91 class ParmNode : public ProjNode { 92 static const char * const names[TypeFunc::Parms+1]; 93 public: 94 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 95 init_class_id(Class_Parm); 96 } 97 virtual int Opcode() const; 98 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 99 virtual uint ideal_reg() const; 100 #ifndef PRODUCT 101 virtual void dump_spec(outputStream *st) const; 102 #endif 103 }; 104 105 106 //------------------------------ReturnNode------------------------------------- 107 // Return from subroutine node 108 class ReturnNode : public Node { 109 public: 110 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 111 virtual int Opcode() const; 112 virtual bool is_CFG() const { return true; } 113 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 114 virtual bool depends_only_on_test() const { return false; } 115 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 116 virtual const Type *Value( PhaseTransform *phase ) const; 117 virtual uint ideal_reg() const { return NotAMachineReg; } 118 virtual uint match_edge(uint idx) const; 119 #ifndef PRODUCT 120 virtual void dump_req() const; 121 #endif 122 }; 123 124 125 //------------------------------RethrowNode------------------------------------ 126 // Rethrow of exception at call site. Ends a procedure before rethrowing; 127 // ends the current basic block like a ReturnNode. Restores registers and 128 // unwinds stack. Rethrow happens in the caller's method. 129 class RethrowNode : public Node { 130 public: 131 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 132 virtual int Opcode() const; 133 virtual bool is_CFG() const { return true; } 134 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 135 virtual bool depends_only_on_test() const { return false; } 136 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 137 virtual const Type *Value( PhaseTransform *phase ) const; 138 virtual uint match_edge(uint idx) const; 139 virtual uint ideal_reg() const { return NotAMachineReg; } 140 #ifndef PRODUCT 141 virtual void dump_req() const; 142 #endif 143 }; 144 145 146 //------------------------------TailCallNode----------------------------------- 147 // Pop stack frame and jump indirect 148 class TailCallNode : public ReturnNode { 149 public: 150 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 151 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 152 init_req(TypeFunc::Parms, target); 153 init_req(TypeFunc::Parms+1, moop); 154 } 155 156 virtual int Opcode() const; 157 virtual uint match_edge(uint idx) const; 158 }; 159 160 //------------------------------TailJumpNode----------------------------------- 161 // Pop stack frame and jump indirect 162 class TailJumpNode : public ReturnNode { 163 public: 164 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 165 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 166 init_req(TypeFunc::Parms, target); 167 init_req(TypeFunc::Parms+1, ex_oop); 168 } 169 170 virtual int Opcode() const; 171 virtual uint match_edge(uint idx) const; 172 }; 173 174 //-------------------------------JVMState------------------------------------- 175 // A linked list of JVMState nodes captures the whole interpreter state, 176 // plus GC roots, for all active calls at some call site in this compilation 177 // unit. (If there is no inlining, then the list has exactly one link.) 178 // This provides a way to map the optimized program back into the interpreter, 179 // or to let the GC mark the stack. 180 class JVMState : public ResourceObj { 181 public: 182 typedef enum { 183 Reexecute_Undefined = -1, // not defined -- will be translated into false later 184 Reexecute_False = 0, // false -- do not reexecute 185 Reexecute_True = 1 // true -- reexecute the bytecode 186 } ReexecuteState; //Reexecute State 187 188 private: 189 JVMState* _caller; // List pointer for forming scope chains 190 uint _depth; // One mroe than caller depth, or one. 191 uint _locoff; // Offset to locals in input edge mapping 192 uint _stkoff; // Offset to stack in input edge mapping 193 uint _monoff; // Offset to monitors in input edge mapping 194 uint _scloff; // Offset to fields of scalar objs in input edge mapping 195 uint _endoff; // Offset to end of input edge mapping 196 uint _sp; // Jave Expression Stack Pointer for this state 197 int _bci; // Byte Code Index of this JVM point 198 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 199 ciMethod* _method; // Method Pointer 200 SafePointNode* _map; // Map node associated with this scope 201 public: 202 friend class Compile; 203 friend class PreserveReexecuteState; 204 205 // Because JVMState objects live over the entire lifetime of the 206 // Compile object, they are allocated into the comp_arena, which 207 // does not get resource marked or reset during the compile process 208 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } 209 void operator delete( void * ) { } // fast deallocation 210 211 // Create a new JVMState, ready for abstract interpretation. 212 JVMState(ciMethod* method, JVMState* caller); 213 JVMState(int stack_size); // root state; has a null method 214 215 // Access functions for the JVM 216 uint locoff() const { return _locoff; } 217 uint stkoff() const { return _stkoff; } 218 uint argoff() const { return _stkoff + _sp; } 219 uint monoff() const { return _monoff; } 220 uint scloff() const { return _scloff; } 221 uint endoff() const { return _endoff; } 222 uint oopoff() const { return debug_end(); } 223 224 int loc_size() const { return _stkoff - _locoff; } 225 int stk_size() const { return _monoff - _stkoff; } 226 int mon_size() const { return _scloff - _monoff; } 227 int scl_size() const { return _endoff - _scloff; } 228 229 bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } 230 bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } 231 bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } 232 bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } 233 234 uint sp() const { return _sp; } 235 int bci() const { return _bci; } 236 bool should_reexecute() const { return _reexecute==Reexecute_True; } 237 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 238 bool has_method() const { return _method != NULL; } 239 ciMethod* method() const { assert(has_method(), ""); return _method; } 240 JVMState* caller() const { return _caller; } 241 SafePointNode* map() const { return _map; } 242 uint depth() const { return _depth; } 243 uint debug_start() const; // returns locoff of root caller 244 uint debug_end() const; // returns endoff of self 245 uint debug_size() const { 246 return loc_size() + sp() + mon_size() + scl_size(); 247 } 248 uint debug_depth() const; // returns sum of debug_size values at all depths 249 250 // Returns the JVM state at the desired depth (1 == root). 251 JVMState* of_depth(int d) const; 252 253 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 254 bool same_calls_as(const JVMState* that) const; 255 256 // Monitors (monitors are stored as (boxNode, objNode) pairs 257 enum { logMonitorEdges = 1 }; 258 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 259 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 260 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 261 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 262 bool is_monitor_box(uint off) const { 263 assert(is_mon(off), "should be called only for monitor edge"); 264 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 265 } 266 bool is_monitor_use(uint off) const { return (is_mon(off) 267 && is_monitor_box(off)) 268 || (caller() && caller()->is_monitor_use(off)); } 269 270 // Initialization functions for the JVM 271 void set_locoff(uint off) { _locoff = off; } 272 void set_stkoff(uint off) { _stkoff = off; } 273 void set_monoff(uint off) { _monoff = off; } 274 void set_scloff(uint off) { _scloff = off; } 275 void set_endoff(uint off) { _endoff = off; } 276 void set_offsets(uint off) { 277 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 278 } 279 void set_map(SafePointNode *map) { _map = map; } 280 void set_sp(uint sp) { _sp = sp; } 281 //Note: _reexecute should always be undefined when a new _bci is set 282 void set_bci(int bci) {assert(_reexecute==Reexecute_Undefined || _bci==bci, "sanity check"); _bci = bci; } 283 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 284 void set_reexecute_undefined() {_reexecute = Reexecute_Undefined; } 285 286 // Miscellaneous utility functions 287 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 288 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 289 290 #ifndef PRODUCT 291 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 292 void dump_spec(outputStream *st) const; 293 void dump_on(outputStream* st) const; 294 void dump() const { 295 dump_on(tty); 296 } 297 #endif 298 }; 299 300 //------------------------------SafePointNode---------------------------------- 301 // A SafePointNode is a subclass of a MultiNode for convenience (and 302 // potential code sharing) only - conceptually it is independent of 303 // the Node semantics. 304 class SafePointNode : public MultiNode { 305 virtual uint cmp( const Node &n ) const; 306 virtual uint size_of() const; // Size is bigger 307 308 public: 309 SafePointNode(uint edges, JVMState* jvms, 310 // A plain safepoint advertises no memory effects (NULL): 311 const TypePtr* adr_type = NULL) 312 : MultiNode( edges ), 313 _jvms(jvms), 314 _oop_map(NULL), 315 _adr_type(adr_type) 316 { 317 init_class_id(Class_SafePoint); 318 } 319 320 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 321 JVMState* const _jvms; // Pointer to list of JVM State objects 322 const TypePtr* _adr_type; // What type of memory does this node produce? 323 324 // Many calls take *all* of memory as input, 325 // but some produce a limited subset of that memory as output. 326 // The adr_type reports the call's behavior as a store, not a load. 327 328 virtual JVMState* jvms() const { return _jvms; } 329 void set_jvms(JVMState* s) { 330 *(JVMState**)&_jvms = s; // override const attribute in the accessor 331 } 332 OopMap *oop_map() const { return _oop_map; } 333 void set_oop_map(OopMap *om) { _oop_map = om; } 334 335 // Functionality from old debug nodes which has changed 336 Node *local(JVMState* jvms, uint idx) const { 337 assert(verify_jvms(jvms), "jvms must match"); 338 return in(jvms->locoff() + idx); 339 } 340 Node *stack(JVMState* jvms, uint idx) const { 341 assert(verify_jvms(jvms), "jvms must match"); 342 return in(jvms->stkoff() + idx); 343 } 344 Node *argument(JVMState* jvms, uint idx) const { 345 assert(verify_jvms(jvms), "jvms must match"); 346 return in(jvms->argoff() + idx); 347 } 348 Node *monitor_box(JVMState* jvms, uint idx) const { 349 assert(verify_jvms(jvms), "jvms must match"); 350 return in(jvms->monitor_box_offset(idx)); 351 } 352 Node *monitor_obj(JVMState* jvms, uint idx) const { 353 assert(verify_jvms(jvms), "jvms must match"); 354 return in(jvms->monitor_obj_offset(idx)); 355 } 356 357 void set_local(JVMState* jvms, uint idx, Node *c); 358 359 void set_stack(JVMState* jvms, uint idx, Node *c) { 360 assert(verify_jvms(jvms), "jvms must match"); 361 set_req(jvms->stkoff() + idx, c); 362 } 363 void set_argument(JVMState* jvms, uint idx, Node *c) { 364 assert(verify_jvms(jvms), "jvms must match"); 365 set_req(jvms->argoff() + idx, c); 366 } 367 void ensure_stack(JVMState* jvms, uint stk_size) { 368 assert(verify_jvms(jvms), "jvms must match"); 369 int grow_by = (int)stk_size - (int)jvms->stk_size(); 370 if (grow_by > 0) grow_stack(jvms, grow_by); 371 } 372 void grow_stack(JVMState* jvms, uint grow_by); 373 // Handle monitor stack 374 void push_monitor( const FastLockNode *lock ); 375 void pop_monitor (); 376 Node *peek_monitor_box() const; 377 Node *peek_monitor_obj() const; 378 379 // Access functions for the JVM 380 Node *control () const { return in(TypeFunc::Control ); } 381 Node *i_o () const { return in(TypeFunc::I_O ); } 382 Node *memory () const { return in(TypeFunc::Memory ); } 383 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 384 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 385 386 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 387 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 388 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 389 390 MergeMemNode* merged_memory() const { 391 return in(TypeFunc::Memory)->as_MergeMem(); 392 } 393 394 // The parser marks useless maps as dead when it's done with them: 395 bool is_killed() { return in(TypeFunc::Control) == NULL; } 396 397 // Exception states bubbling out of subgraphs such as inlined calls 398 // are recorded here. (There might be more than one, hence the "next".) 399 // This feature is used only for safepoints which serve as "maps" 400 // for JVM states during parsing, intrinsic expansion, etc. 401 SafePointNode* next_exception() const; 402 void set_next_exception(SafePointNode* n); 403 bool has_exceptions() const { return next_exception() != NULL; } 404 405 // Standard Node stuff 406 virtual int Opcode() const; 407 virtual bool pinned() const { return true; } 408 virtual const Type *Value( PhaseTransform *phase ) const; 409 virtual const Type *bottom_type() const { return Type::CONTROL; } 410 virtual const TypePtr *adr_type() const { return _adr_type; } 411 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 412 virtual Node *Identity( PhaseTransform *phase ); 413 virtual uint ideal_reg() const { return 0; } 414 virtual const RegMask &in_RegMask(uint) const; 415 virtual const RegMask &out_RegMask() const; 416 virtual uint match_edge(uint idx) const; 417 418 static bool needs_polling_address_input(); 419 420 #ifndef PRODUCT 421 virtual void dump_spec(outputStream *st) const; 422 #endif 423 }; 424 425 //------------------------------SafePointScalarObjectNode---------------------- 426 // A SafePointScalarObjectNode represents the state of a scalarized object 427 // at a safepoint. 428 429 class SafePointScalarObjectNode: public TypeNode { 430 uint _first_index; // First input edge index of a SafePoint node where 431 // states of the scalarized object fields are collected. 432 uint _n_fields; // Number of non-static fields of the scalarized object. 433 DEBUG_ONLY(AllocateNode* _alloc;) 434 public: 435 SafePointScalarObjectNode(const TypeOopPtr* tp, 436 #ifdef ASSERT 437 AllocateNode* alloc, 438 #endif 439 uint first_index, uint n_fields); 440 virtual int Opcode() const; 441 virtual uint ideal_reg() const; 442 virtual const RegMask &in_RegMask(uint) const; 443 virtual const RegMask &out_RegMask() const; 444 virtual uint match_edge(uint idx) const; 445 446 uint first_index() const { return _first_index; } 447 uint n_fields() const { return _n_fields; } 448 DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; }) 449 450 // SafePointScalarObject should be always pinned to the control edge 451 // of the SafePoint node for which it was generated. 452 virtual bool pinned() const; // { return true; } 453 454 // SafePointScalarObject depends on the SafePoint node 455 // for which it was generated. 456 virtual bool depends_only_on_test() const; // { return false; } 457 458 virtual uint size_of() const { return sizeof(*this); } 459 460 // Assumes that "this" is an argument to a safepoint node "s", and that 461 // "new_call" is being created to correspond to "s". But the difference 462 // between the start index of the jvmstates of "new_call" and "s" is 463 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 464 // corresponds appropriately to "this" in "new_call". Assumes that 465 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 466 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 467 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; 468 469 #ifndef PRODUCT 470 virtual void dump_spec(outputStream *st) const; 471 #endif 472 }; 473 474 //------------------------------CallNode--------------------------------------- 475 // Call nodes now subsume the function of debug nodes at callsites, so they 476 // contain the functionality of a full scope chain of debug nodes. 477 class CallNode : public SafePointNode { 478 public: 479 const TypeFunc *_tf; // Function type 480 address _entry_point; // Address of method being called 481 float _cnt; // Estimate of number of times called 482 483 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 484 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), 485 _tf(tf), 486 _entry_point(addr), 487 _cnt(COUNT_UNKNOWN) 488 { 489 init_class_id(Class_Call); 490 init_flags(Flag_is_Call); 491 } 492 493 const TypeFunc* tf() const { return _tf; } 494 const address entry_point() const { return _entry_point; } 495 const float cnt() const { return _cnt; } 496 497 void set_tf(const TypeFunc* tf) { _tf = tf; } 498 void set_entry_point(address p) { _entry_point = p; } 499 void set_cnt(float c) { _cnt = c; } 500 501 virtual const Type *bottom_type() const; 502 virtual const Type *Value( PhaseTransform *phase ) const; 503 virtual Node *Identity( PhaseTransform *phase ) { return this; } 504 virtual uint cmp( const Node &n ) const; 505 virtual uint size_of() const = 0; 506 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 507 virtual Node *match( const ProjNode *proj, const Matcher *m ); 508 virtual uint ideal_reg() const { return NotAMachineReg; } 509 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 510 // for some macro nodes whose expansion does not have a safepoint on the fast path. 511 virtual bool guaranteed_safepoint() { return true; } 512 // For macro nodes, the JVMState gets modified during expansion, so when cloning 513 // the node the JVMState must be cloned. 514 virtual void clone_jvms() { } // default is not to clone 515 516 // Returns true if the call may modify n 517 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); 518 // Does this node have a use of n other than in debug information? 519 bool has_non_debug_use(Node *n); 520 // Returns the unique CheckCastPP of a call 521 // or result projection is there are several CheckCastPP 522 // or returns NULL if there is no one. 523 Node *result_cast(); 524 525 virtual uint match_edge(uint idx) const; 526 527 #ifndef PRODUCT 528 virtual void dump_req() const; 529 virtual void dump_spec(outputStream *st) const; 530 #endif 531 }; 532 533 //------------------------------CallJavaNode----------------------------------- 534 // Make a static or dynamic subroutine call node using Java calling 535 // convention. (The "Java" calling convention is the compiler's calling 536 // convention, as opposed to the interpreter's or that of native C.) 537 class CallJavaNode : public CallNode { 538 protected: 539 virtual uint cmp( const Node &n ) const; 540 virtual uint size_of() const; // Size is bigger 541 542 bool _optimized_virtual; 543 ciMethod* _method; // Method being direct called 544 public: 545 const int _bci; // Byte Code Index of call byte code 546 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 547 : CallNode(tf, addr, TypePtr::BOTTOM), 548 _method(method), _bci(bci), _optimized_virtual(false) 549 { 550 init_class_id(Class_CallJava); 551 } 552 553 virtual int Opcode() const; 554 ciMethod* method() const { return _method; } 555 void set_method(ciMethod *m) { _method = m; } 556 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 557 bool is_optimized_virtual() const { return _optimized_virtual; } 558 559 #ifndef PRODUCT 560 virtual void dump_spec(outputStream *st) const; 561 #endif 562 }; 563 564 //------------------------------CallStaticJavaNode----------------------------- 565 // Make a direct subroutine call using Java calling convention (for static 566 // calls and optimized virtual calls, plus calls to wrappers for run-time 567 // routines); generates static stub. 568 class CallStaticJavaNode : public CallJavaNode { 569 virtual uint cmp( const Node &n ) const; 570 virtual uint size_of() const; // Size is bigger 571 public: 572 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci) 573 : CallJavaNode(tf, addr, method, bci), _name(NULL) { 574 init_class_id(Class_CallStaticJava); 575 } 576 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 577 const TypePtr* adr_type) 578 : CallJavaNode(tf, addr, NULL, bci), _name(name) { 579 init_class_id(Class_CallStaticJava); 580 // This node calls a runtime stub, which often has narrow memory effects. 581 _adr_type = adr_type; 582 } 583 const char *_name; // Runtime wrapper name 584 585 // If this is an uncommon trap, return the request code, else zero. 586 int uncommon_trap_request() const; 587 static int extract_uncommon_trap_request(const Node* call); 588 589 virtual int Opcode() const; 590 #ifndef PRODUCT 591 virtual void dump_spec(outputStream *st) const; 592 #endif 593 }; 594 595 //------------------------------CallDynamicJavaNode---------------------------- 596 // Make a dispatched call using Java calling convention. 597 class CallDynamicJavaNode : public CallJavaNode { 598 virtual uint cmp( const Node &n ) const; 599 virtual uint size_of() const; // Size is bigger 600 public: 601 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 602 init_class_id(Class_CallDynamicJava); 603 } 604 605 int _vtable_index; 606 virtual int Opcode() const; 607 #ifndef PRODUCT 608 virtual void dump_spec(outputStream *st) const; 609 #endif 610 }; 611 612 //------------------------------CallRuntimeNode-------------------------------- 613 // Make a direct subroutine call node into compiled C++ code. 614 class CallRuntimeNode : public CallNode { 615 virtual uint cmp( const Node &n ) const; 616 virtual uint size_of() const; // Size is bigger 617 public: 618 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 619 const TypePtr* adr_type) 620 : CallNode(tf, addr, adr_type), 621 _name(name) 622 { 623 init_class_id(Class_CallRuntime); 624 } 625 626 const char *_name; // Printable name, if _method is NULL 627 virtual int Opcode() const; 628 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 629 630 #ifndef PRODUCT 631 virtual void dump_spec(outputStream *st) const; 632 #endif 633 }; 634 635 //------------------------------CallLeafNode----------------------------------- 636 // Make a direct subroutine call node into compiled C++ code, without 637 // safepoints 638 class CallLeafNode : public CallRuntimeNode { 639 public: 640 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 641 const TypePtr* adr_type) 642 : CallRuntimeNode(tf, addr, name, adr_type) 643 { 644 init_class_id(Class_CallLeaf); 645 } 646 virtual int Opcode() const; 647 virtual bool guaranteed_safepoint() { return false; } 648 #ifndef PRODUCT 649 virtual void dump_spec(outputStream *st) const; 650 #endif 651 }; 652 653 //------------------------------CallLeafNoFPNode------------------------------- 654 // CallLeafNode, not using floating point or using it in the same manner as 655 // the generated code 656 class CallLeafNoFPNode : public CallLeafNode { 657 public: 658 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 659 const TypePtr* adr_type) 660 : CallLeafNode(tf, addr, name, adr_type) 661 { 662 } 663 virtual int Opcode() const; 664 }; 665 666 667 //------------------------------Allocate--------------------------------------- 668 // High-level memory allocation 669 // 670 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 671 // get expanded into a code sequence containing a call. Unlike other CallNodes, 672 // they have 2 memory projections and 2 i_o projections (which are distinguished by 673 // the _is_io_use flag in the projection.) This is needed when expanding the node in 674 // order to differentiate the uses of the projection on the normal control path from 675 // those on the exception return path. 676 // 677 class AllocateNode : public CallNode { 678 public: 679 enum { 680 // Output: 681 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 682 // Inputs: 683 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 684 KlassNode, // type (maybe dynamic) of the obj. 685 InitialTest, // slow-path test (may be constant) 686 ALength, // array length (or TOP if none) 687 ParmLimit 688 }; 689 690 static const TypeFunc* alloc_type() { 691 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 692 fields[AllocSize] = TypeInt::POS; 693 fields[KlassNode] = TypeInstPtr::NOTNULL; 694 fields[InitialTest] = TypeInt::BOOL; 695 fields[ALength] = TypeInt::INT; // length (can be a bad length) 696 697 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 698 699 // create result type (range) 700 fields = TypeTuple::fields(1); 701 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 702 703 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 704 705 return TypeFunc::make(domain, range); 706 } 707 708 bool _is_scalar_replaceable; // Result of Escape Analysis 709 710 virtual uint size_of() const; // Size is bigger 711 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 712 Node *size, Node *klass_node, Node *initial_test); 713 // Expansion modifies the JVMState, so we need to clone it 714 virtual void clone_jvms() { 715 set_jvms(jvms()->clone_deep(Compile::current())); 716 } 717 virtual int Opcode() const; 718 virtual uint ideal_reg() const { return Op_RegP; } 719 virtual bool guaranteed_safepoint() { return false; } 720 721 // allocations do not modify their arguments 722 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} 723 724 // Pattern-match a possible usage of AllocateNode. 725 // Return null if no allocation is recognized. 726 // The operand is the pointer produced by the (possible) allocation. 727 // It must be a projection of the Allocate or its subsequent CastPP. 728 // (Note: This function is defined in file graphKit.cpp, near 729 // GraphKit::new_instance/new_array, whose output it recognizes.) 730 // The 'ptr' may not have an offset unless the 'offset' argument is given. 731 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 732 733 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 734 // an offset, which is reported back to the caller. 735 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 736 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 737 intptr_t& offset); 738 739 // Dig the klass operand out of a (possible) allocation site. 740 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 741 AllocateNode* allo = Ideal_allocation(ptr, phase); 742 return (allo == NULL) ? NULL : allo->in(KlassNode); 743 } 744 745 // Conservatively small estimate of offset of first non-header byte. 746 int minimum_header_size() { 747 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 748 instanceOopDesc::base_offset_in_bytes(); 749 } 750 751 // Return the corresponding initialization barrier (or null if none). 752 // Walks out edges to find it... 753 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 754 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 755 InitializeNode* initialization(); 756 757 // Convenience for initialization->maybe_set_complete(phase) 758 bool maybe_set_complete(PhaseGVN* phase); 759 }; 760 761 //------------------------------AllocateArray--------------------------------- 762 // 763 // High-level array allocation 764 // 765 class AllocateArrayNode : public AllocateNode { 766 public: 767 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 768 Node* size, Node* klass_node, Node* initial_test, 769 Node* count_val 770 ) 771 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 772 initial_test) 773 { 774 init_class_id(Class_AllocateArray); 775 set_req(AllocateNode::ALength, count_val); 776 } 777 virtual int Opcode() const; 778 virtual uint size_of() const; // Size is bigger 779 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 780 781 // Dig the length operand out of a array allocation site. 782 Node* Ideal_length() { 783 return in(AllocateNode::ALength); 784 } 785 786 // Dig the length operand out of a array allocation site and narrow the 787 // type with a CastII, if necesssary 788 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 789 790 // Pattern-match a possible usage of AllocateArrayNode. 791 // Return null if no allocation is recognized. 792 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 793 AllocateNode* allo = Ideal_allocation(ptr, phase); 794 return (allo == NULL || !allo->is_AllocateArray()) 795 ? NULL : allo->as_AllocateArray(); 796 } 797 }; 798 799 //------------------------------AbstractLockNode----------------------------------- 800 class AbstractLockNode: public CallNode { 801 private: 802 bool _eliminate; // indicates this lock can be safely eliminated 803 bool _coarsened; // indicates this lock was coarsened 804 #ifndef PRODUCT 805 NamedCounter* _counter; 806 #endif 807 808 protected: 809 // helper functions for lock elimination 810 // 811 812 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 813 GrowableArray<AbstractLockNode*> &lock_ops); 814 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 815 GrowableArray<AbstractLockNode*> &lock_ops); 816 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 817 GrowableArray<AbstractLockNode*> &lock_ops); 818 LockNode *find_matching_lock(UnlockNode* unlock); 819 820 821 public: 822 AbstractLockNode(const TypeFunc *tf) 823 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 824 _coarsened(false), 825 _eliminate(false) 826 { 827 #ifndef PRODUCT 828 _counter = NULL; 829 #endif 830 } 831 virtual int Opcode() const = 0; 832 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 833 Node * box_node() const {return in(TypeFunc::Parms + 1); } 834 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 835 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 836 837 virtual uint size_of() const { return sizeof(*this); } 838 839 bool is_eliminated() {return _eliminate; } 840 // mark node as eliminated and update the counter if there is one 841 void set_eliminated(); 842 843 bool is_coarsened() { return _coarsened; } 844 void set_coarsened() { _coarsened = true; } 845 846 // locking does not modify its arguments 847 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} 848 849 #ifndef PRODUCT 850 void create_lock_counter(JVMState* s); 851 NamedCounter* counter() const { return _counter; } 852 #endif 853 }; 854 855 //------------------------------Lock--------------------------------------- 856 // High-level lock operation 857 // 858 // This is a subclass of CallNode because it is a macro node which gets expanded 859 // into a code sequence containing a call. This node takes 3 "parameters": 860 // 0 - object to lock 861 // 1 - a BoxLockNode 862 // 2 - a FastLockNode 863 // 864 class LockNode : public AbstractLockNode { 865 public: 866 867 static const TypeFunc *lock_type() { 868 // create input type (domain) 869 const Type **fields = TypeTuple::fields(3); 870 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 871 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 872 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 873 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 874 875 // create result type (range) 876 fields = TypeTuple::fields(0); 877 878 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 879 880 return TypeFunc::make(domain,range); 881 } 882 883 virtual int Opcode() const; 884 virtual uint size_of() const; // Size is bigger 885 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 886 init_class_id(Class_Lock); 887 init_flags(Flag_is_macro); 888 C->add_macro_node(this); 889 } 890 virtual bool guaranteed_safepoint() { return false; } 891 892 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 893 // Expansion modifies the JVMState, so we need to clone it 894 virtual void clone_jvms() { 895 set_jvms(jvms()->clone_deep(Compile::current())); 896 } 897 }; 898 899 //------------------------------Unlock--------------------------------------- 900 // High-level unlock operation 901 class UnlockNode : public AbstractLockNode { 902 public: 903 virtual int Opcode() const; 904 virtual uint size_of() const; // Size is bigger 905 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 906 init_class_id(Class_Unlock); 907 init_flags(Flag_is_macro); 908 C->add_macro_node(this); 909 } 910 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 911 // unlock is never a safepoint 912 virtual bool guaranteed_safepoint() { return false; } 913 };