1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP 26 #define SHARE_VM_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/type.hpp" 34 35 // Portions of code courtesy of Clifford Click 36 37 // Optimization - Graph Style 38 39 class Chaitin; 40 class NamedCounter; 41 class MultiNode; 42 class SafePointNode; 43 class CallNode; 44 class CallJavaNode; 45 class CallStaticJavaNode; 46 class CallDynamicJavaNode; 47 class CallRuntimeNode; 48 class CallLeafNode; 49 class CallLeafNoFPNode; 50 class AllocateNode; 51 class AllocateArrayNode; 52 class LockNode; 53 class UnlockNode; 54 class JVMState; 55 class OopMap; 56 class State; 57 class StartNode; 58 class MachCallNode; 59 class FastLockNode; 60 61 //------------------------------StartNode-------------------------------------- 62 // The method start node 63 class StartNode : public MultiNode { 64 virtual uint cmp( const Node &n ) const; 65 virtual uint size_of() const; // Size is bigger 66 public: 67 const TypeTuple *_domain; 68 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 69 init_class_id(Class_Start); 70 init_req(0,this); 71 init_req(1,root); 72 } 73 virtual int Opcode() const; 74 virtual bool pinned() const { return true; }; 75 virtual const Type *bottom_type() const; 76 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 77 virtual const Type *Value( PhaseTransform *phase ) const; 78 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 79 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 80 virtual const RegMask &in_RegMask(uint) const; 81 virtual Node *match( const ProjNode *proj, const Matcher *m ); 82 virtual uint ideal_reg() const { return 0; } 83 #ifndef PRODUCT 84 virtual void dump_spec(outputStream *st) const; 85 #endif 86 }; 87 88 //------------------------------StartOSRNode----------------------------------- 89 // The method start node for on stack replacement code 90 class StartOSRNode : public StartNode { 91 public: 92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 93 virtual int Opcode() const; 94 static const TypeTuple *osr_domain(); 95 }; 96 97 98 //------------------------------ParmNode--------------------------------------- 99 // Incoming parameters 100 class ParmNode : public ProjNode { 101 static const char * const names[TypeFunc::Parms+1]; 102 public: 103 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 104 init_class_id(Class_Parm); 105 } 106 virtual int Opcode() const; 107 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 108 virtual uint ideal_reg() const; 109 #ifndef PRODUCT 110 virtual void dump_spec(outputStream *st) const; 111 #endif 112 }; 113 114 115 //------------------------------ReturnNode------------------------------------- 116 // Return from subroutine node 117 class ReturnNode : public Node { 118 public: 119 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 120 virtual int Opcode() const; 121 virtual bool is_CFG() const { return true; } 122 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 123 virtual bool depends_only_on_test() const { return false; } 124 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 125 virtual const Type *Value( PhaseTransform *phase ) const; 126 virtual uint ideal_reg() const { return NotAMachineReg; } 127 virtual uint match_edge(uint idx) const; 128 #ifndef PRODUCT 129 virtual void dump_req(outputStream *st = tty) const; 130 #endif 131 }; 132 133 134 //------------------------------RethrowNode------------------------------------ 135 // Rethrow of exception at call site. Ends a procedure before rethrowing; 136 // ends the current basic block like a ReturnNode. Restores registers and 137 // unwinds stack. Rethrow happens in the caller's method. 138 class RethrowNode : public Node { 139 public: 140 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 141 virtual int Opcode() const; 142 virtual bool is_CFG() const { return true; } 143 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 144 virtual bool depends_only_on_test() const { return false; } 145 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 146 virtual const Type *Value( PhaseTransform *phase ) const; 147 virtual uint match_edge(uint idx) const; 148 virtual uint ideal_reg() const { return NotAMachineReg; } 149 #ifndef PRODUCT 150 virtual void dump_req(outputStream *st = tty) const; 151 #endif 152 }; 153 154 155 //------------------------------TailCallNode----------------------------------- 156 // Pop stack frame and jump indirect 157 class TailCallNode : public ReturnNode { 158 public: 159 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 160 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 161 init_req(TypeFunc::Parms, target); 162 init_req(TypeFunc::Parms+1, moop); 163 } 164 165 virtual int Opcode() const; 166 virtual uint match_edge(uint idx) const; 167 }; 168 169 //------------------------------TailJumpNode----------------------------------- 170 // Pop stack frame and jump indirect 171 class TailJumpNode : public ReturnNode { 172 public: 173 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 174 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 175 init_req(TypeFunc::Parms, target); 176 init_req(TypeFunc::Parms+1, ex_oop); 177 } 178 179 virtual int Opcode() const; 180 virtual uint match_edge(uint idx) const; 181 }; 182 183 //-------------------------------JVMState------------------------------------- 184 // A linked list of JVMState nodes captures the whole interpreter state, 185 // plus GC roots, for all active calls at some call site in this compilation 186 // unit. (If there is no inlining, then the list has exactly one link.) 187 // This provides a way to map the optimized program back into the interpreter, 188 // or to let the GC mark the stack. 189 class JVMState : public ResourceObj { 190 friend class VMStructs; 191 public: 192 typedef enum { 193 Reexecute_Undefined = -1, // not defined -- will be translated into false later 194 Reexecute_False = 0, // false -- do not reexecute 195 Reexecute_True = 1 // true -- reexecute the bytecode 196 } ReexecuteState; //Reexecute State 197 198 private: 199 JVMState* _caller; // List pointer for forming scope chains 200 uint _depth; // One more than caller depth, or one. 201 uint _locoff; // Offset to locals in input edge mapping 202 uint _stkoff; // Offset to stack in input edge mapping 203 uint _monoff; // Offset to monitors in input edge mapping 204 uint _scloff; // Offset to fields of scalar objs in input edge mapping 205 uint _endoff; // Offset to end of input edge mapping 206 uint _sp; // Jave Expression Stack Pointer for this state 207 int _bci; // Byte Code Index of this JVM point 208 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 209 ciMethod* _method; // Method Pointer 210 SafePointNode* _map; // Map node associated with this scope 211 public: 212 friend class Compile; 213 friend class PreserveReexecuteState; 214 215 // Because JVMState objects live over the entire lifetime of the 216 // Compile object, they are allocated into the comp_arena, which 217 // does not get resource marked or reset during the compile process 218 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } 219 void operator delete( void * ) { } // fast deallocation 220 221 // Create a new JVMState, ready for abstract interpretation. 222 JVMState(ciMethod* method, JVMState* caller); 223 JVMState(int stack_size); // root state; has a null method 224 225 // Access functions for the JVM 226 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 227 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 228 uint locoff() const { return _locoff; } 229 uint stkoff() const { return _stkoff; } 230 uint argoff() const { return _stkoff + _sp; } 231 uint monoff() const { return _monoff; } 232 uint scloff() const { return _scloff; } 233 uint endoff() const { return _endoff; } 234 uint oopoff() const { return debug_end(); } 235 236 int loc_size() const { return stkoff() - locoff(); } 237 int stk_size() const { return monoff() - stkoff(); } 238 int arg_size() const { return monoff() - argoff(); } 239 int mon_size() const { return scloff() - monoff(); } 240 int scl_size() const { return endoff() - scloff(); } 241 242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 246 247 uint sp() const { return _sp; } 248 int bci() const { return _bci; } 249 bool should_reexecute() const { return _reexecute==Reexecute_True; } 250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 251 bool has_method() const { return _method != NULL; } 252 ciMethod* method() const { assert(has_method(), ""); return _method; } 253 JVMState* caller() const { return _caller; } 254 SafePointNode* map() const { return _map; } 255 uint depth() const { return _depth; } 256 uint debug_start() const; // returns locoff of root caller 257 uint debug_end() const; // returns endoff of self 258 uint debug_size() const { 259 return loc_size() + sp() + mon_size() + scl_size(); 260 } 261 uint debug_depth() const; // returns sum of debug_size values at all depths 262 263 // Returns the JVM state at the desired depth (1 == root). 264 JVMState* of_depth(int d) const; 265 266 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 267 bool same_calls_as(const JVMState* that) const; 268 269 // Monitors (monitors are stored as (boxNode, objNode) pairs 270 enum { logMonitorEdges = 1 }; 271 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 275 bool is_monitor_box(uint off) const { 276 assert(is_mon(off), "should be called only for monitor edge"); 277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 278 } 279 bool is_monitor_use(uint off) const { return (is_mon(off) 280 && is_monitor_box(off)) 281 || (caller() && caller()->is_monitor_use(off)); } 282 283 // Initialization functions for the JVM 284 void set_locoff(uint off) { _locoff = off; } 285 void set_stkoff(uint off) { _stkoff = off; } 286 void set_monoff(uint off) { _monoff = off; } 287 void set_scloff(uint off) { _scloff = off; } 288 void set_endoff(uint off) { _endoff = off; } 289 void set_offsets(uint off) { 290 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 291 } 292 void set_map(SafePointNode *map) { _map = map; } 293 void set_sp(uint sp) { _sp = sp; } 294 // _reexecute is initialized to "undefined" for a new bci 295 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 296 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 297 298 // Miscellaneous utility functions 299 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 300 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 301 302 #ifndef PRODUCT 303 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 304 void dump_spec(outputStream *st) const; 305 void dump_on(outputStream* st) const; 306 void dump() const { 307 dump_on(tty); 308 } 309 #endif 310 }; 311 312 //------------------------------SafePointNode---------------------------------- 313 // A SafePointNode is a subclass of a MultiNode for convenience (and 314 // potential code sharing) only - conceptually it is independent of 315 // the Node semantics. 316 class SafePointNode : public MultiNode { 317 virtual uint cmp( const Node &n ) const; 318 virtual uint size_of() const; // Size is bigger 319 320 public: 321 SafePointNode(uint edges, JVMState* jvms, 322 // A plain safepoint advertises no memory effects (NULL): 323 const TypePtr* adr_type = NULL) 324 : MultiNode( edges ), 325 _jvms(jvms), 326 _oop_map(NULL), 327 _adr_type(adr_type) 328 { 329 init_class_id(Class_SafePoint); 330 } 331 332 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 333 JVMState* const _jvms; // Pointer to list of JVM State objects 334 const TypePtr* _adr_type; // What type of memory does this node produce? 335 336 // Many calls take *all* of memory as input, 337 // but some produce a limited subset of that memory as output. 338 // The adr_type reports the call's behavior as a store, not a load. 339 340 virtual JVMState* jvms() const { return _jvms; } 341 void set_jvms(JVMState* s) { 342 *(JVMState**)&_jvms = s; // override const attribute in the accessor 343 } 344 OopMap *oop_map() const { return _oop_map; } 345 void set_oop_map(OopMap *om) { _oop_map = om; } 346 347 private: 348 void verify_input(JVMState* jvms, uint idx) const { 349 assert(verify_jvms(jvms), "jvms must match"); 350 Node* n = in(idx); 351 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 352 in(idx + 1)->is_top(), "2nd half of long/double"); 353 } 354 355 public: 356 // Functionality from old debug nodes which has changed 357 Node *local(JVMState* jvms, uint idx) const { 358 verify_input(jvms, jvms->locoff() + idx); 359 return in(jvms->locoff() + idx); 360 } 361 Node *stack(JVMState* jvms, uint idx) const { 362 verify_input(jvms, jvms->stkoff() + idx); 363 return in(jvms->stkoff() + idx); 364 } 365 Node *argument(JVMState* jvms, uint idx) const { 366 verify_input(jvms, jvms->argoff() + idx); 367 return in(jvms->argoff() + idx); 368 } 369 Node *monitor_box(JVMState* jvms, uint idx) const { 370 assert(verify_jvms(jvms), "jvms must match"); 371 return in(jvms->monitor_box_offset(idx)); 372 } 373 Node *monitor_obj(JVMState* jvms, uint idx) const { 374 assert(verify_jvms(jvms), "jvms must match"); 375 return in(jvms->monitor_obj_offset(idx)); 376 } 377 378 void set_local(JVMState* jvms, uint idx, Node *c); 379 380 void set_stack(JVMState* jvms, uint idx, Node *c) { 381 assert(verify_jvms(jvms), "jvms must match"); 382 set_req(jvms->stkoff() + idx, c); 383 } 384 void set_argument(JVMState* jvms, uint idx, Node *c) { 385 assert(verify_jvms(jvms), "jvms must match"); 386 set_req(jvms->argoff() + idx, c); 387 } 388 void ensure_stack(JVMState* jvms, uint stk_size) { 389 assert(verify_jvms(jvms), "jvms must match"); 390 int grow_by = (int)stk_size - (int)jvms->stk_size(); 391 if (grow_by > 0) grow_stack(jvms, grow_by); 392 } 393 void grow_stack(JVMState* jvms, uint grow_by); 394 // Handle monitor stack 395 void push_monitor( const FastLockNode *lock ); 396 void pop_monitor (); 397 Node *peek_monitor_box() const; 398 Node *peek_monitor_obj() const; 399 400 // Access functions for the JVM 401 Node *control () const { return in(TypeFunc::Control ); } 402 Node *i_o () const { return in(TypeFunc::I_O ); } 403 Node *memory () const { return in(TypeFunc::Memory ); } 404 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 405 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 406 407 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 408 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 409 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 410 411 MergeMemNode* merged_memory() const { 412 return in(TypeFunc::Memory)->as_MergeMem(); 413 } 414 415 // The parser marks useless maps as dead when it's done with them: 416 bool is_killed() { return in(TypeFunc::Control) == NULL; } 417 418 // Exception states bubbling out of subgraphs such as inlined calls 419 // are recorded here. (There might be more than one, hence the "next".) 420 // This feature is used only for safepoints which serve as "maps" 421 // for JVM states during parsing, intrinsic expansion, etc. 422 SafePointNode* next_exception() const; 423 void set_next_exception(SafePointNode* n); 424 bool has_exceptions() const { return next_exception() != NULL; } 425 426 // Standard Node stuff 427 virtual int Opcode() const; 428 virtual bool pinned() const { return true; } 429 virtual const Type *Value( PhaseTransform *phase ) const; 430 virtual const Type *bottom_type() const { return Type::CONTROL; } 431 virtual const TypePtr *adr_type() const { return _adr_type; } 432 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 433 virtual Node *Identity( PhaseTransform *phase ); 434 virtual uint ideal_reg() const { return 0; } 435 virtual const RegMask &in_RegMask(uint) const; 436 virtual const RegMask &out_RegMask() const; 437 virtual uint match_edge(uint idx) const; 438 439 static bool needs_polling_address_input(); 440 441 #ifndef PRODUCT 442 virtual void dump_spec(outputStream *st) const; 443 #endif 444 }; 445 446 //------------------------------SafePointScalarObjectNode---------------------- 447 // A SafePointScalarObjectNode represents the state of a scalarized object 448 // at a safepoint. 449 450 class SafePointScalarObjectNode: public TypeNode { 451 uint _first_index; // First input edge index of a SafePoint node where 452 // states of the scalarized object fields are collected. 453 uint _n_fields; // Number of non-static fields of the scalarized object. 454 DEBUG_ONLY(AllocateNode* _alloc;) 455 456 virtual uint hash() const ; // { return NO_HASH; } 457 virtual uint cmp( const Node &n ) const; 458 459 public: 460 SafePointScalarObjectNode(const TypeOopPtr* tp, 461 #ifdef ASSERT 462 AllocateNode* alloc, 463 #endif 464 uint first_index, uint n_fields); 465 virtual int Opcode() const; 466 virtual uint ideal_reg() const; 467 virtual const RegMask &in_RegMask(uint) const; 468 virtual const RegMask &out_RegMask() const; 469 virtual uint match_edge(uint idx) const; 470 471 uint first_index() const { return _first_index; } 472 uint n_fields() const { return _n_fields; } 473 474 #ifdef ASSERT 475 AllocateNode* alloc() const { return _alloc; } 476 #endif 477 478 virtual uint size_of() const { return sizeof(*this); } 479 480 // Assumes that "this" is an argument to a safepoint node "s", and that 481 // "new_call" is being created to correspond to "s". But the difference 482 // between the start index of the jvmstates of "new_call" and "s" is 483 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 484 // corresponds appropriately to "this" in "new_call". Assumes that 485 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 486 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 487 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; 488 489 #ifndef PRODUCT 490 virtual void dump_spec(outputStream *st) const; 491 #endif 492 }; 493 494 495 // Simple container for the outgoing projections of a call. Useful 496 // for serious surgery on calls. 497 class CallProjections : public StackObj { 498 public: 499 Node* fallthrough_proj; 500 Node* fallthrough_catchproj; 501 Node* fallthrough_memproj; 502 Node* fallthrough_ioproj; 503 Node* catchall_catchproj; 504 Node* catchall_memproj; 505 Node* catchall_ioproj; 506 Node* resproj; 507 Node* exobj; 508 }; 509 510 class CallGenerator; 511 512 //------------------------------CallNode--------------------------------------- 513 // Call nodes now subsume the function of debug nodes at callsites, so they 514 // contain the functionality of a full scope chain of debug nodes. 515 class CallNode : public SafePointNode { 516 friend class VMStructs; 517 public: 518 const TypeFunc *_tf; // Function type 519 address _entry_point; // Address of method being called 520 float _cnt; // Estimate of number of times called 521 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 522 523 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 524 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), 525 _tf(tf), 526 _entry_point(addr), 527 _cnt(COUNT_UNKNOWN), 528 _generator(NULL) 529 { 530 init_class_id(Class_Call); 531 } 532 533 const TypeFunc* tf() const { return _tf; } 534 const address entry_point() const { return _entry_point; } 535 const float cnt() const { return _cnt; } 536 CallGenerator* generator() const { return _generator; } 537 538 void set_tf(const TypeFunc* tf) { _tf = tf; } 539 void set_entry_point(address p) { _entry_point = p; } 540 void set_cnt(float c) { _cnt = c; } 541 void set_generator(CallGenerator* cg) { _generator = cg; } 542 543 virtual const Type *bottom_type() const; 544 virtual const Type *Value( PhaseTransform *phase ) const; 545 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 546 virtual Node *Identity( PhaseTransform *phase ) { return this; } 547 virtual uint cmp( const Node &n ) const; 548 virtual uint size_of() const = 0; 549 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 550 virtual Node *match( const ProjNode *proj, const Matcher *m ); 551 virtual uint ideal_reg() const { return NotAMachineReg; } 552 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 553 // for some macro nodes whose expansion does not have a safepoint on the fast path. 554 virtual bool guaranteed_safepoint() { return true; } 555 // For macro nodes, the JVMState gets modified during expansion, so when cloning 556 // the node the JVMState must be cloned. 557 virtual void clone_jvms() { } // default is not to clone 558 559 // Returns true if the call may modify n 560 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase); 561 // Does this node have a use of n other than in debug information? 562 bool has_non_debug_use(Node *n); 563 // Returns the unique CheckCastPP of a call 564 // or result projection is there are several CheckCastPP 565 // or returns NULL if there is no one. 566 Node *result_cast(); 567 // Does this node returns pointer? 568 bool returns_pointer() const { 569 const TypeTuple *r = tf()->range(); 570 return (r->cnt() > TypeFunc::Parms && 571 r->field_at(TypeFunc::Parms)->isa_ptr()); 572 } 573 574 // Collect all the interesting edges from a call for use in 575 // replacing the call by something else. Used by macro expansion 576 // and the late inlining support. 577 void extract_projections(CallProjections* projs, bool separate_io_proj); 578 579 virtual uint match_edge(uint idx) const; 580 581 #ifndef PRODUCT 582 virtual void dump_req(outputStream *st = tty) const; 583 virtual void dump_spec(outputStream *st) const; 584 #endif 585 }; 586 587 588 //------------------------------CallJavaNode----------------------------------- 589 // Make a static or dynamic subroutine call node using Java calling 590 // convention. (The "Java" calling convention is the compiler's calling 591 // convention, as opposed to the interpreter's or that of native C.) 592 class CallJavaNode : public CallNode { 593 friend class VMStructs; 594 protected: 595 virtual uint cmp( const Node &n ) const; 596 virtual uint size_of() const; // Size is bigger 597 598 bool _optimized_virtual; 599 bool _method_handle_invoke; 600 ciMethod* _method; // Method being direct called 601 public: 602 const int _bci; // Byte Code Index of call byte code 603 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 604 : CallNode(tf, addr, TypePtr::BOTTOM), 605 _method(method), _bci(bci), 606 _optimized_virtual(false), 607 _method_handle_invoke(false) 608 { 609 init_class_id(Class_CallJava); 610 } 611 612 virtual int Opcode() const; 613 ciMethod* method() const { return _method; } 614 void set_method(ciMethod *m) { _method = m; } 615 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 616 bool is_optimized_virtual() const { return _optimized_virtual; } 617 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 618 bool is_method_handle_invoke() const { return _method_handle_invoke; } 619 620 #ifndef PRODUCT 621 virtual void dump_spec(outputStream *st) const; 622 #endif 623 }; 624 625 //------------------------------CallStaticJavaNode----------------------------- 626 // Make a direct subroutine call using Java calling convention (for static 627 // calls and optimized virtual calls, plus calls to wrappers for run-time 628 // routines); generates static stub. 629 class CallStaticJavaNode : public CallJavaNode { 630 virtual uint cmp( const Node &n ) const; 631 virtual uint size_of() const; // Size is bigger 632 public: 633 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci) 634 : CallJavaNode(tf, addr, method, bci), _name(NULL) { 635 init_class_id(Class_CallStaticJava); 636 } 637 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 638 const TypePtr* adr_type) 639 : CallJavaNode(tf, addr, NULL, bci), _name(name) { 640 init_class_id(Class_CallStaticJava); 641 // This node calls a runtime stub, which often has narrow memory effects. 642 _adr_type = adr_type; 643 } 644 const char *_name; // Runtime wrapper name 645 646 // If this is an uncommon trap, return the request code, else zero. 647 int uncommon_trap_request() const; 648 static int extract_uncommon_trap_request(const Node* call); 649 650 virtual int Opcode() const; 651 #ifndef PRODUCT 652 virtual void dump_spec(outputStream *st) const; 653 #endif 654 }; 655 656 //------------------------------CallDynamicJavaNode---------------------------- 657 // Make a dispatched call using Java calling convention. 658 class CallDynamicJavaNode : public CallJavaNode { 659 virtual uint cmp( const Node &n ) const; 660 virtual uint size_of() const; // Size is bigger 661 public: 662 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 663 init_class_id(Class_CallDynamicJava); 664 } 665 666 int _vtable_index; 667 virtual int Opcode() const; 668 #ifndef PRODUCT 669 virtual void dump_spec(outputStream *st) const; 670 #endif 671 }; 672 673 //------------------------------CallRuntimeNode-------------------------------- 674 // Make a direct subroutine call node into compiled C++ code. 675 class CallRuntimeNode : public CallNode { 676 virtual uint cmp( const Node &n ) const; 677 virtual uint size_of() const; // Size is bigger 678 public: 679 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 680 const TypePtr* adr_type) 681 : CallNode(tf, addr, adr_type), 682 _name(name) 683 { 684 init_class_id(Class_CallRuntime); 685 } 686 687 const char *_name; // Printable name, if _method is NULL 688 virtual int Opcode() const; 689 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 690 691 #ifndef PRODUCT 692 virtual void dump_spec(outputStream *st) const; 693 #endif 694 }; 695 696 //------------------------------CallLeafNode----------------------------------- 697 // Make a direct subroutine call node into compiled C++ code, without 698 // safepoints 699 class CallLeafNode : public CallRuntimeNode { 700 public: 701 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 702 const TypePtr* adr_type) 703 : CallRuntimeNode(tf, addr, name, adr_type) 704 { 705 init_class_id(Class_CallLeaf); 706 } 707 virtual int Opcode() const; 708 virtual bool guaranteed_safepoint() { return false; } 709 #ifndef PRODUCT 710 virtual void dump_spec(outputStream *st) const; 711 #endif 712 }; 713 714 //------------------------------CallLeafNoFPNode------------------------------- 715 // CallLeafNode, not using floating point or using it in the same manner as 716 // the generated code 717 class CallLeafNoFPNode : public CallLeafNode { 718 public: 719 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 720 const TypePtr* adr_type) 721 : CallLeafNode(tf, addr, name, adr_type) 722 { 723 } 724 virtual int Opcode() const; 725 }; 726 727 728 //------------------------------Allocate--------------------------------------- 729 // High-level memory allocation 730 // 731 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 732 // get expanded into a code sequence containing a call. Unlike other CallNodes, 733 // they have 2 memory projections and 2 i_o projections (which are distinguished by 734 // the _is_io_use flag in the projection.) This is needed when expanding the node in 735 // order to differentiate the uses of the projection on the normal control path from 736 // those on the exception return path. 737 // 738 class AllocateNode : public CallNode { 739 public: 740 enum { 741 // Output: 742 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 743 // Inputs: 744 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 745 KlassNode, // type (maybe dynamic) of the obj. 746 InitialTest, // slow-path test (may be constant) 747 ALength, // array length (or TOP if none) 748 ParmLimit 749 }; 750 751 static const TypeFunc* alloc_type() { 752 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 753 fields[AllocSize] = TypeInt::POS; 754 fields[KlassNode] = TypeInstPtr::NOTNULL; 755 fields[InitialTest] = TypeInt::BOOL; 756 fields[ALength] = TypeInt::INT; // length (can be a bad length) 757 758 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 759 760 // create result type (range) 761 fields = TypeTuple::fields(1); 762 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 763 764 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 765 766 return TypeFunc::make(domain, range); 767 } 768 769 bool _is_scalar_replaceable; // Result of Escape Analysis 770 771 virtual uint size_of() const; // Size is bigger 772 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 773 Node *size, Node *klass_node, Node *initial_test); 774 // Expansion modifies the JVMState, so we need to clone it 775 virtual void clone_jvms() { 776 set_jvms(jvms()->clone_deep(Compile::current())); 777 } 778 virtual int Opcode() const; 779 virtual uint ideal_reg() const { return Op_RegP; } 780 virtual bool guaranteed_safepoint() { return false; } 781 782 // allocations do not modify their arguments 783 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;} 784 785 // Pattern-match a possible usage of AllocateNode. 786 // Return null if no allocation is recognized. 787 // The operand is the pointer produced by the (possible) allocation. 788 // It must be a projection of the Allocate or its subsequent CastPP. 789 // (Note: This function is defined in file graphKit.cpp, near 790 // GraphKit::new_instance/new_array, whose output it recognizes.) 791 // The 'ptr' may not have an offset unless the 'offset' argument is given. 792 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 793 794 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 795 // an offset, which is reported back to the caller. 796 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 797 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 798 intptr_t& offset); 799 800 // Dig the klass operand out of a (possible) allocation site. 801 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 802 AllocateNode* allo = Ideal_allocation(ptr, phase); 803 return (allo == NULL) ? NULL : allo->in(KlassNode); 804 } 805 806 // Conservatively small estimate of offset of first non-header byte. 807 int minimum_header_size() { 808 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 809 instanceOopDesc::base_offset_in_bytes(); 810 } 811 812 // Return the corresponding initialization barrier (or null if none). 813 // Walks out edges to find it... 814 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 815 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 816 InitializeNode* initialization(); 817 818 // Return the corresponding storestore barrier (or null if none). 819 // Walks out edges to find it... 820 MemBarStoreStoreNode* storestore(); 821 822 // Convenience for initialization->maybe_set_complete(phase) 823 bool maybe_set_complete(PhaseGVN* phase); 824 }; 825 826 //------------------------------AllocateArray--------------------------------- 827 // 828 // High-level array allocation 829 // 830 class AllocateArrayNode : public AllocateNode { 831 public: 832 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 833 Node* size, Node* klass_node, Node* initial_test, 834 Node* count_val 835 ) 836 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 837 initial_test) 838 { 839 init_class_id(Class_AllocateArray); 840 set_req(AllocateNode::ALength, count_val); 841 } 842 virtual int Opcode() const; 843 virtual uint size_of() const; // Size is bigger 844 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 845 846 // Dig the length operand out of a array allocation site. 847 Node* Ideal_length() { 848 return in(AllocateNode::ALength); 849 } 850 851 // Dig the length operand out of a array allocation site and narrow the 852 // type with a CastII, if necesssary 853 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 854 855 // Pattern-match a possible usage of AllocateArrayNode. 856 // Return null if no allocation is recognized. 857 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 858 AllocateNode* allo = Ideal_allocation(ptr, phase); 859 return (allo == NULL || !allo->is_AllocateArray()) 860 ? NULL : allo->as_AllocateArray(); 861 } 862 }; 863 864 //------------------------------AbstractLockNode----------------------------------- 865 class AbstractLockNode: public CallNode { 866 private: 867 enum { 868 Regular = 0, // Normal lock 869 NonEscObj, // Lock is used for non escaping object 870 Coarsened, // Lock was coarsened 871 Nested // Nested lock 872 } _kind; 873 #ifndef PRODUCT 874 NamedCounter* _counter; 875 #endif 876 877 protected: 878 // helper functions for lock elimination 879 // 880 881 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 882 GrowableArray<AbstractLockNode*> &lock_ops); 883 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 884 GrowableArray<AbstractLockNode*> &lock_ops); 885 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 886 GrowableArray<AbstractLockNode*> &lock_ops); 887 LockNode *find_matching_lock(UnlockNode* unlock); 888 889 // Update the counter to indicate that this lock was eliminated. 890 void set_eliminated_lock_counter() PRODUCT_RETURN; 891 892 public: 893 AbstractLockNode(const TypeFunc *tf) 894 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 895 _kind(Regular) 896 { 897 #ifndef PRODUCT 898 _counter = NULL; 899 #endif 900 } 901 virtual int Opcode() const = 0; 902 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 903 Node * box_node() const {return in(TypeFunc::Parms + 1); } 904 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 905 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 906 907 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 908 909 virtual uint size_of() const { return sizeof(*this); } 910 911 bool is_eliminated() const { return (_kind != Regular); } 912 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 913 bool is_coarsened() const { return (_kind == Coarsened); } 914 bool is_nested() const { return (_kind == Nested); } 915 916 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 917 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 918 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 919 920 // locking does not modify its arguments 921 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;} 922 923 #ifndef PRODUCT 924 void create_lock_counter(JVMState* s); 925 NamedCounter* counter() const { return _counter; } 926 #endif 927 }; 928 929 //------------------------------Lock--------------------------------------- 930 // High-level lock operation 931 // 932 // This is a subclass of CallNode because it is a macro node which gets expanded 933 // into a code sequence containing a call. This node takes 3 "parameters": 934 // 0 - object to lock 935 // 1 - a BoxLockNode 936 // 2 - a FastLockNode 937 // 938 class LockNode : public AbstractLockNode { 939 public: 940 941 static const TypeFunc *lock_type() { 942 // create input type (domain) 943 const Type **fields = TypeTuple::fields(3); 944 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 945 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 946 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 947 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 948 949 // create result type (range) 950 fields = TypeTuple::fields(0); 951 952 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 953 954 return TypeFunc::make(domain,range); 955 } 956 957 virtual int Opcode() const; 958 virtual uint size_of() const; // Size is bigger 959 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 960 init_class_id(Class_Lock); 961 init_flags(Flag_is_macro); 962 C->add_macro_node(this); 963 } 964 virtual bool guaranteed_safepoint() { return false; } 965 966 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 967 // Expansion modifies the JVMState, so we need to clone it 968 virtual void clone_jvms() { 969 set_jvms(jvms()->clone_deep(Compile::current())); 970 } 971 972 bool is_nested_lock_region(); // Is this Lock nested? 973 }; 974 975 //------------------------------Unlock--------------------------------------- 976 // High-level unlock operation 977 class UnlockNode : public AbstractLockNode { 978 public: 979 virtual int Opcode() const; 980 virtual uint size_of() const; // Size is bigger 981 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 982 init_class_id(Class_Unlock); 983 init_flags(Flag_is_macro); 984 C->add_macro_node(this); 985 } 986 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 987 // unlock is never a safepoint 988 virtual bool guaranteed_safepoint() { return false; } 989 }; 990 991 #endif // SHARE_VM_OPTO_CALLNODE_HPP