1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP 26 #define SHARE_VM_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/type.hpp" 34 35 // Portions of code courtesy of Clifford Click 36 37 // Optimization - Graph Style 38 39 class Chaitin; 40 class NamedCounter; 41 class MultiNode; 42 class SafePointNode; 43 class CallNode; 44 class CallJavaNode; 45 class CallStaticJavaNode; 46 class CallDynamicJavaNode; 47 class CallRuntimeNode; 48 class CallLeafNode; 49 class CallLeafNoFPNode; 50 class AllocateNode; 51 class AllocateArrayNode; 52 class BoxLockNode; 53 class LockNode; 54 class UnlockNode; 55 class JVMState; 56 class OopMap; 57 class State; 58 class StartNode; 59 class MachCallNode; 60 class FastLockNode; 61 62 //------------------------------StartNode-------------------------------------- 63 // The method start node 64 class StartNode : public MultiNode { 65 virtual uint cmp( const Node &n ) const; 66 virtual uint size_of() const; // Size is bigger 67 public: 68 const TypeTuple *_domain; 69 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 70 init_class_id(Class_Start); 71 init_req(0,this); 72 init_req(1,root); 73 } 74 virtual int Opcode() const; 75 virtual bool pinned() const { return true; }; 76 virtual const Type *bottom_type() const; 77 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 78 virtual const Type *Value( PhaseTransform *phase ) const; 79 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 80 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 81 virtual const RegMask &in_RegMask(uint) const; 82 virtual Node *match( const ProjNode *proj, const Matcher *m ); 83 virtual uint ideal_reg() const { return 0; } 84 #ifndef PRODUCT 85 virtual void dump_spec(outputStream *st) const; 86 #endif 87 }; 88 89 //------------------------------StartOSRNode----------------------------------- 90 // The method start node for on stack replacement code 91 class StartOSRNode : public StartNode { 92 public: 93 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 94 virtual int Opcode() const; 95 static const TypeTuple *osr_domain(); 96 }; 97 98 99 //------------------------------ParmNode--------------------------------------- 100 // Incoming parameters 101 class ParmNode : public ProjNode { 102 static const char * const names[TypeFunc::Parms+1]; 103 public: 104 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 105 init_class_id(Class_Parm); 106 } 107 virtual int Opcode() const; 108 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 109 virtual uint ideal_reg() const; 110 #ifndef PRODUCT 111 virtual void dump_spec(outputStream *st) const; 112 #endif 113 }; 114 115 116 //------------------------------ReturnNode------------------------------------- 117 // Return from subroutine node 118 class ReturnNode : public Node { 119 public: 120 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 121 virtual int Opcode() const; 122 virtual bool is_CFG() const { return true; } 123 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 124 virtual bool depends_only_on_test() const { return false; } 125 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 126 virtual const Type *Value( PhaseTransform *phase ) const; 127 virtual uint ideal_reg() const { return NotAMachineReg; } 128 virtual uint match_edge(uint idx) const; 129 #ifndef PRODUCT 130 virtual void dump_req(outputStream *st = tty) const; 131 #endif 132 }; 133 134 135 //------------------------------RethrowNode------------------------------------ 136 // Rethrow of exception at call site. Ends a procedure before rethrowing; 137 // ends the current basic block like a ReturnNode. Restores registers and 138 // unwinds stack. Rethrow happens in the caller's method. 139 class RethrowNode : public Node { 140 public: 141 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 142 virtual int Opcode() const; 143 virtual bool is_CFG() const { return true; } 144 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 145 virtual bool depends_only_on_test() const { return false; } 146 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 147 virtual const Type *Value( PhaseTransform *phase ) const; 148 virtual uint match_edge(uint idx) const; 149 virtual uint ideal_reg() const { return NotAMachineReg; } 150 #ifndef PRODUCT 151 virtual void dump_req(outputStream *st = tty) const; 152 #endif 153 }; 154 155 156 //------------------------------TailCallNode----------------------------------- 157 // Pop stack frame and jump indirect 158 class TailCallNode : public ReturnNode { 159 public: 160 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 161 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 162 init_req(TypeFunc::Parms, target); 163 init_req(TypeFunc::Parms+1, moop); 164 } 165 166 virtual int Opcode() const; 167 virtual uint match_edge(uint idx) const; 168 }; 169 170 //------------------------------TailJumpNode----------------------------------- 171 // Pop stack frame and jump indirect 172 class TailJumpNode : public ReturnNode { 173 public: 174 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 175 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 176 init_req(TypeFunc::Parms, target); 177 init_req(TypeFunc::Parms+1, ex_oop); 178 } 179 180 virtual int Opcode() const; 181 virtual uint match_edge(uint idx) const; 182 }; 183 184 //-------------------------------JVMState------------------------------------- 185 // A linked list of JVMState nodes captures the whole interpreter state, 186 // plus GC roots, for all active calls at some call site in this compilation 187 // unit. (If there is no inlining, then the list has exactly one link.) 188 // This provides a way to map the optimized program back into the interpreter, 189 // or to let the GC mark the stack. 190 class JVMState : public ResourceObj { 191 friend class VMStructs; 192 public: 193 typedef enum { 194 Reexecute_Undefined = -1, // not defined -- will be translated into false later 195 Reexecute_False = 0, // false -- do not reexecute 196 Reexecute_True = 1 // true -- reexecute the bytecode 197 } ReexecuteState; //Reexecute State 198 199 private: 200 JVMState* _caller; // List pointer for forming scope chains 201 uint _depth; // One more than caller depth, or one. 202 uint _locoff; // Offset to locals in input edge mapping 203 uint _stkoff; // Offset to stack in input edge mapping 204 uint _monoff; // Offset to monitors in input edge mapping 205 uint _scloff; // Offset to fields of scalar objs in input edge mapping 206 uint _endoff; // Offset to end of input edge mapping 207 uint _sp; // Jave Expression Stack Pointer for this state 208 int _bci; // Byte Code Index of this JVM point 209 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 210 ciMethod* _method; // Method Pointer 211 SafePointNode* _map; // Map node associated with this scope 212 public: 213 friend class Compile; 214 friend class PreserveReexecuteState; 215 216 // Because JVMState objects live over the entire lifetime of the 217 // Compile object, they are allocated into the comp_arena, which 218 // does not get resource marked or reset during the compile process 219 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 220 void operator delete( void * ) { } // fast deallocation 221 222 // Create a new JVMState, ready for abstract interpretation. 223 JVMState(ciMethod* method, JVMState* caller); 224 JVMState(int stack_size); // root state; has a null method 225 226 // Access functions for the JVM 227 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 228 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 229 uint locoff() const { return _locoff; } 230 uint stkoff() const { return _stkoff; } 231 uint argoff() const { return _stkoff + _sp; } 232 uint monoff() const { return _monoff; } 233 uint scloff() const { return _scloff; } 234 uint endoff() const { return _endoff; } 235 uint oopoff() const { return debug_end(); } 236 237 int loc_size() const { return stkoff() - locoff(); } 238 int stk_size() const { return monoff() - stkoff(); } 239 int mon_size() const { return scloff() - monoff(); } 240 int scl_size() const { return endoff() - scloff(); } 241 242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 246 247 uint sp() const { return _sp; } 248 int bci() const { return _bci; } 249 bool should_reexecute() const { return _reexecute==Reexecute_True; } 250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 251 bool has_method() const { return _method != NULL; } 252 ciMethod* method() const { assert(has_method(), ""); return _method; } 253 JVMState* caller() const { return _caller; } 254 SafePointNode* map() const { return _map; } 255 uint depth() const { return _depth; } 256 uint debug_start() const; // returns locoff of root caller 257 uint debug_end() const; // returns endoff of self 258 uint debug_size() const { 259 return loc_size() + sp() + mon_size() + scl_size(); 260 } 261 uint debug_depth() const; // returns sum of debug_size values at all depths 262 263 // Returns the JVM state at the desired depth (1 == root). 264 JVMState* of_depth(int d) const; 265 266 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 267 bool same_calls_as(const JVMState* that) const; 268 269 // Monitors (monitors are stored as (boxNode, objNode) pairs 270 enum { logMonitorEdges = 1 }; 271 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 275 bool is_monitor_box(uint off) const { 276 assert(is_mon(off), "should be called only for monitor edge"); 277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 278 } 279 bool is_monitor_use(uint off) const { return (is_mon(off) 280 && is_monitor_box(off)) 281 || (caller() && caller()->is_monitor_use(off)); } 282 283 // Initialization functions for the JVM 284 void set_locoff(uint off) { _locoff = off; } 285 void set_stkoff(uint off) { _stkoff = off; } 286 void set_monoff(uint off) { _monoff = off; } 287 void set_scloff(uint off) { _scloff = off; } 288 void set_endoff(uint off) { _endoff = off; } 289 void set_offsets(uint off) { 290 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 291 } 292 void set_map(SafePointNode *map) { _map = map; } 293 void set_sp(uint sp) { _sp = sp; } 294 // _reexecute is initialized to "undefined" for a new bci 295 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 296 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 297 298 // Miscellaneous utility functions 299 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 300 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 301 void set_map_deep(SafePointNode *map);// reset map for all callers 302 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 303 int interpreter_frame_size() const; 304 305 #ifndef PRODUCT 306 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 307 void dump_spec(outputStream *st) const; 308 void dump_on(outputStream* st) const; 309 void dump() const { 310 dump_on(tty); 311 } 312 #endif 313 }; 314 315 // During parsing, when a node is "improved", 316 // GraphKit::replace_in_map() is called to update the current map so 317 // that the improved node is used from that point 318 // on. GraphKit::replace_in_map() doesn't operate on the callers maps 319 // and so some optimization opportunities may be lost. The 320 // ReplacedNodes class addresses that problem. 321 // 322 // A ReplacedNodes object is a list of pair of nodes. Every 323 // SafePointNode carries a ReplacedNodes object. Every time 324 // GraphKit::replace_in_map() is called, a new pair of nodes is pushed 325 // on the list of replaced nodes. When control flow paths merge, their 326 // replaced nodes are also merged. When parsing exits a method to 327 // return to a caller, the replaced nodes on the exit path are used to 328 // update the caller's map. 329 class ReplacedNodes VALUE_OBJ_CLASS_SPEC { 330 private: 331 class ReplacedNode VALUE_OBJ_CLASS_SPEC { 332 private: 333 Node* _initial; 334 Node* _improved; 335 public: 336 ReplacedNode() : _initial(NULL), _improved(NULL) {} 337 ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {} 338 Node* initial() const { return _initial; } 339 Node* improved() const { return _improved; } 340 341 bool operator==(const ReplacedNode& other) { 342 return _initial == other._initial && _improved == other._improved; 343 } 344 }; 345 GrowableArray<ReplacedNode>* _replaced_nodes; 346 347 void allocate_if_necessary(); 348 bool has_node(ReplacedNode r) const; 349 bool has_target_node(Node* n) const; 350 351 public: 352 ReplacedNodes() 353 : _replaced_nodes(NULL) {} 354 355 void clone(); 356 void record(Node* initial, Node* improved); 357 void transfer_from(ReplacedNodes other, uint idx); 358 void reset(); 359 void apply(Node* n); 360 void merge_with(ReplacedNodes other); 361 bool is_empty() const; 362 void dump(outputStream *st) const; 363 void apply(Compile* C, Node* ctl); 364 }; 365 366 //------------------------------SafePointNode---------------------------------- 367 // A SafePointNode is a subclass of a MultiNode for convenience (and 368 // potential code sharing) only - conceptually it is independent of 369 // the Node semantics. 370 class SafePointNode : public MultiNode { 371 virtual uint cmp( const Node &n ) const; 372 virtual uint size_of() const; // Size is bigger 373 374 public: 375 SafePointNode(uint edges, JVMState* jvms, 376 // A plain safepoint advertises no memory effects (NULL): 377 const TypePtr* adr_type = NULL) 378 : MultiNode( edges ), 379 _jvms(jvms), 380 _oop_map(NULL), 381 _adr_type(adr_type) 382 { 383 init_class_id(Class_SafePoint); 384 } 385 386 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 387 JVMState* const _jvms; // Pointer to list of JVM State objects 388 const TypePtr* _adr_type; // What type of memory does this node produce? 389 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 390 391 // Many calls take *all* of memory as input, 392 // but some produce a limited subset of that memory as output. 393 // The adr_type reports the call's behavior as a store, not a load. 394 395 virtual JVMState* jvms() const { return _jvms; } 396 void set_jvms(JVMState* s) { 397 *(JVMState**)&_jvms = s; // override const attribute in the accessor 398 } 399 OopMap *oop_map() const { return _oop_map; } 400 void set_oop_map(OopMap *om) { _oop_map = om; } 401 402 private: 403 void verify_input(JVMState* jvms, uint idx) const { 404 assert(verify_jvms(jvms), "jvms must match"); 405 Node* n = in(idx); 406 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 407 in(idx + 1)->is_top(), "2nd half of long/double"); 408 } 409 410 public: 411 // Functionality from old debug nodes which has changed 412 Node *local(JVMState* jvms, uint idx) const { 413 verify_input(jvms, jvms->locoff() + idx); 414 return in(jvms->locoff() + idx); 415 } 416 Node *stack(JVMState* jvms, uint idx) const { 417 verify_input(jvms, jvms->stkoff() + idx); 418 return in(jvms->stkoff() + idx); 419 } 420 Node *argument(JVMState* jvms, uint idx) const { 421 verify_input(jvms, jvms->argoff() + idx); 422 return in(jvms->argoff() + idx); 423 } 424 Node *monitor_box(JVMState* jvms, uint idx) const { 425 assert(verify_jvms(jvms), "jvms must match"); 426 return in(jvms->monitor_box_offset(idx)); 427 } 428 Node *monitor_obj(JVMState* jvms, uint idx) const { 429 assert(verify_jvms(jvms), "jvms must match"); 430 return in(jvms->monitor_obj_offset(idx)); 431 } 432 433 void set_local(JVMState* jvms, uint idx, Node *c); 434 435 void set_stack(JVMState* jvms, uint idx, Node *c) { 436 assert(verify_jvms(jvms), "jvms must match"); 437 set_req(jvms->stkoff() + idx, c); 438 } 439 void set_argument(JVMState* jvms, uint idx, Node *c) { 440 assert(verify_jvms(jvms), "jvms must match"); 441 set_req(jvms->argoff() + idx, c); 442 } 443 void ensure_stack(JVMState* jvms, uint stk_size) { 444 assert(verify_jvms(jvms), "jvms must match"); 445 int grow_by = (int)stk_size - (int)jvms->stk_size(); 446 if (grow_by > 0) grow_stack(jvms, grow_by); 447 } 448 void grow_stack(JVMState* jvms, uint grow_by); 449 // Handle monitor stack 450 void push_monitor( const FastLockNode *lock ); 451 void pop_monitor (); 452 Node *peek_monitor_box() const; 453 Node *peek_monitor_obj() const; 454 455 // Access functions for the JVM 456 Node *control () const { return in(TypeFunc::Control ); } 457 Node *i_o () const { return in(TypeFunc::I_O ); } 458 Node *memory () const { return in(TypeFunc::Memory ); } 459 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 460 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 461 462 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 463 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 464 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 465 466 MergeMemNode* merged_memory() const { 467 return in(TypeFunc::Memory)->as_MergeMem(); 468 } 469 470 // The parser marks useless maps as dead when it's done with them: 471 bool is_killed() { return in(TypeFunc::Control) == NULL; } 472 473 // Exception states bubbling out of subgraphs such as inlined calls 474 // are recorded here. (There might be more than one, hence the "next".) 475 // This feature is used only for safepoints which serve as "maps" 476 // for JVM states during parsing, intrinsic expansion, etc. 477 SafePointNode* next_exception() const; 478 void set_next_exception(SafePointNode* n); 479 bool has_exceptions() const { return next_exception() != NULL; } 480 481 // Helper methods to operate on replaced nodes 482 ReplacedNodes replaced_nodes() const { 483 return _replaced_nodes; 484 } 485 486 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 487 _replaced_nodes = replaced_nodes; 488 } 489 490 void clone_replaced_nodes() { 491 _replaced_nodes.clone(); 492 } 493 void record_replaced_node(Node* initial, Node* improved) { 494 _replaced_nodes.record(initial, improved); 495 } 496 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 497 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 498 } 499 void delete_replaced_nodes() { 500 _replaced_nodes.reset(); 501 } 502 void apply_replaced_nodes() { 503 _replaced_nodes.apply(this); 504 } 505 void merge_replaced_nodes_with(SafePointNode* sfpt) { 506 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 507 } 508 bool has_replaced_nodes() const { 509 return !_replaced_nodes.is_empty(); 510 } 511 512 // Standard Node stuff 513 virtual int Opcode() const; 514 virtual bool pinned() const { return true; } 515 virtual const Type *Value( PhaseTransform *phase ) const; 516 virtual const Type *bottom_type() const { return Type::CONTROL; } 517 virtual const TypePtr *adr_type() const { return _adr_type; } 518 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 519 virtual Node *Identity( PhaseTransform *phase ); 520 virtual uint ideal_reg() const { return 0; } 521 virtual const RegMask &in_RegMask(uint) const; 522 virtual const RegMask &out_RegMask() const; 523 virtual uint match_edge(uint idx) const; 524 525 static bool needs_polling_address_input(); 526 527 #ifndef PRODUCT 528 virtual void dump_spec(outputStream *st) const; 529 #endif 530 }; 531 532 //------------------------------SafePointScalarObjectNode---------------------- 533 // A SafePointScalarObjectNode represents the state of a scalarized object 534 // at a safepoint. 535 536 class SafePointScalarObjectNode: public TypeNode { 537 uint _first_index; // First input edge relative index of a SafePoint node where 538 // states of the scalarized object fields are collected. 539 // It is relative to the last (youngest) jvms->_scloff. 540 uint _n_fields; // Number of non-static fields of the scalarized object. 541 DEBUG_ONLY(AllocateNode* _alloc;) 542 543 virtual uint hash() const ; // { return NO_HASH; } 544 virtual uint cmp( const Node &n ) const; 545 546 uint first_index() const { return _first_index; } 547 548 public: 549 SafePointScalarObjectNode(const TypeOopPtr* tp, 550 #ifdef ASSERT 551 AllocateNode* alloc, 552 #endif 553 uint first_index, uint n_fields); 554 virtual int Opcode() const; 555 virtual uint ideal_reg() const; 556 virtual const RegMask &in_RegMask(uint) const; 557 virtual const RegMask &out_RegMask() const; 558 virtual uint match_edge(uint idx) const; 559 560 uint first_index(JVMState* jvms) const { 561 assert(jvms != NULL, "missed JVMS"); 562 return jvms->scloff() + _first_index; 563 } 564 uint n_fields() const { return _n_fields; } 565 566 #ifdef ASSERT 567 AllocateNode* alloc() const { return _alloc; } 568 #endif 569 570 virtual uint size_of() const { return sizeof(*this); } 571 572 // Assumes that "this" is an argument to a safepoint node "s", and that 573 // "new_call" is being created to correspond to "s". But the difference 574 // between the start index of the jvmstates of "new_call" and "s" is 575 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 576 // corresponds appropriately to "this" in "new_call". Assumes that 577 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 578 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 579 SafePointScalarObjectNode* clone(Dict* sosn_map) const; 580 581 #ifndef PRODUCT 582 virtual void dump_spec(outputStream *st) const; 583 #endif 584 }; 585 586 587 // Simple container for the outgoing projections of a call. Useful 588 // for serious surgery on calls. 589 class CallProjections : public StackObj { 590 public: 591 Node* fallthrough_proj; 592 Node* fallthrough_catchproj; 593 Node* fallthrough_memproj; 594 Node* fallthrough_ioproj; 595 Node* catchall_catchproj; 596 Node* catchall_memproj; 597 Node* catchall_ioproj; 598 Node* resproj; 599 Node* exobj; 600 }; 601 602 class CallGenerator; 603 604 //------------------------------CallNode--------------------------------------- 605 // Call nodes now subsume the function of debug nodes at callsites, so they 606 // contain the functionality of a full scope chain of debug nodes. 607 class CallNode : public SafePointNode { 608 friend class VMStructs; 609 public: 610 const TypeFunc *_tf; // Function type 611 address _entry_point; // Address of method being called 612 float _cnt; // Estimate of number of times called 613 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 614 615 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 616 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), 617 _tf(tf), 618 _entry_point(addr), 619 _cnt(COUNT_UNKNOWN), 620 _generator(NULL) 621 { 622 init_class_id(Class_Call); 623 } 624 625 const TypeFunc* tf() const { return _tf; } 626 const address entry_point() const { return _entry_point; } 627 const float cnt() const { return _cnt; } 628 CallGenerator* generator() const { return _generator; } 629 630 void set_tf(const TypeFunc* tf) { _tf = tf; } 631 void set_entry_point(address p) { _entry_point = p; } 632 void set_cnt(float c) { _cnt = c; } 633 void set_generator(CallGenerator* cg) { _generator = cg; } 634 635 virtual const Type *bottom_type() const; 636 virtual const Type *Value( PhaseTransform *phase ) const; 637 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 638 virtual Node *Identity( PhaseTransform *phase ) { return this; } 639 virtual uint cmp( const Node &n ) const; 640 virtual uint size_of() const = 0; 641 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 642 virtual Node *match( const ProjNode *proj, const Matcher *m ); 643 virtual uint ideal_reg() const { return NotAMachineReg; } 644 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 645 // for some macro nodes whose expansion does not have a safepoint on the fast path. 646 virtual bool guaranteed_safepoint() { return true; } 647 // For macro nodes, the JVMState gets modified during expansion. If calls 648 // use MachConstantBase, it gets modified during matching. So when cloning 649 // the node the JVMState must be cloned. Default is not to clone. 650 virtual void clone_jvms(Compile* C) { 651 if (C->needs_clone_jvms() && jvms() != NULL) { 652 set_jvms(jvms()->clone_deep(C)); 653 jvms()->set_map_deep(this); 654 } 655 } 656 657 // Returns true if the call may modify n 658 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); 659 // Does this node have a use of n other than in debug information? 660 bool has_non_debug_use(Node *n); 661 // Returns the unique CheckCastPP of a call 662 // or result projection is there are several CheckCastPP 663 // or returns NULL if there is no one. 664 Node *result_cast(); 665 // Does this node returns pointer? 666 bool returns_pointer() const { 667 const TypeTuple *r = tf()->range(); 668 return (r->cnt() > TypeFunc::Parms && 669 r->field_at(TypeFunc::Parms)->isa_ptr()); 670 } 671 672 // Collect all the interesting edges from a call for use in 673 // replacing the call by something else. Used by macro expansion 674 // and the late inlining support. 675 void extract_projections(CallProjections* projs, bool separate_io_proj); 676 677 virtual uint match_edge(uint idx) const; 678 679 #ifndef PRODUCT 680 virtual void dump_req(outputStream *st = tty) const; 681 virtual void dump_spec(outputStream *st) const; 682 #endif 683 }; 684 685 686 //------------------------------CallJavaNode----------------------------------- 687 // Make a static or dynamic subroutine call node using Java calling 688 // convention. (The "Java" calling convention is the compiler's calling 689 // convention, as opposed to the interpreter's or that of native C.) 690 class CallJavaNode : public CallNode { 691 friend class VMStructs; 692 protected: 693 virtual uint cmp( const Node &n ) const; 694 virtual uint size_of() const; // Size is bigger 695 696 bool _optimized_virtual; 697 bool _method_handle_invoke; 698 ciMethod* _method; // Method being direct called 699 public: 700 const int _bci; // Byte Code Index of call byte code 701 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 702 : CallNode(tf, addr, TypePtr::BOTTOM), 703 _method(method), _bci(bci), 704 _optimized_virtual(false), 705 _method_handle_invoke(false) 706 { 707 init_class_id(Class_CallJava); 708 } 709 710 virtual int Opcode() const; 711 ciMethod* method() const { return _method; } 712 void set_method(ciMethod *m) { _method = m; } 713 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 714 bool is_optimized_virtual() const { return _optimized_virtual; } 715 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 716 bool is_method_handle_invoke() const { return _method_handle_invoke; } 717 718 #ifndef PRODUCT 719 virtual void dump_spec(outputStream *st) const; 720 #endif 721 }; 722 723 //------------------------------CallStaticJavaNode----------------------------- 724 // Make a direct subroutine call using Java calling convention (for static 725 // calls and optimized virtual calls, plus calls to wrappers for run-time 726 // routines); generates static stub. 727 class CallStaticJavaNode : public CallJavaNode { 728 virtual uint cmp( const Node &n ) const; 729 virtual uint size_of() const; // Size is bigger 730 public: 731 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) 732 : CallJavaNode(tf, addr, method, bci), _name(NULL) { 733 init_class_id(Class_CallStaticJava); 734 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 735 init_flags(Flag_is_macro); 736 C->add_macro_node(this); 737 } 738 _is_scalar_replaceable = false; 739 _is_non_escaping = false; 740 } 741 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 742 const TypePtr* adr_type) 743 : CallJavaNode(tf, addr, NULL, bci), _name(name) { 744 init_class_id(Class_CallStaticJava); 745 // This node calls a runtime stub, which often has narrow memory effects. 746 _adr_type = adr_type; 747 _is_scalar_replaceable = false; 748 _is_non_escaping = false; 749 } 750 const char *_name; // Runtime wrapper name 751 752 // Result of Escape Analysis 753 bool _is_scalar_replaceable; 754 bool _is_non_escaping; 755 756 // If this is an uncommon trap, return the request code, else zero. 757 int uncommon_trap_request() const; 758 static int extract_uncommon_trap_request(const Node* call); 759 760 bool is_boxing_method() const { 761 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 762 } 763 // Later inlining modifies the JVMState, so we need to clone it 764 // when the call node is cloned (because it is macro node). 765 virtual void clone_jvms(Compile* C) { 766 if ((jvms() != NULL) && is_boxing_method()) { 767 set_jvms(jvms()->clone_deep(C)); 768 jvms()->set_map_deep(this); 769 } 770 } 771 772 virtual int Opcode() const; 773 #ifndef PRODUCT 774 virtual void dump_spec(outputStream *st) const; 775 #endif 776 }; 777 778 //------------------------------CallDynamicJavaNode---------------------------- 779 // Make a dispatched call using Java calling convention. 780 class CallDynamicJavaNode : public CallJavaNode { 781 virtual uint cmp( const Node &n ) const; 782 virtual uint size_of() const; // Size is bigger 783 public: 784 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 785 init_class_id(Class_CallDynamicJava); 786 } 787 788 int _vtable_index; 789 virtual int Opcode() const; 790 #ifndef PRODUCT 791 virtual void dump_spec(outputStream *st) const; 792 #endif 793 }; 794 795 //------------------------------CallRuntimeNode-------------------------------- 796 // Make a direct subroutine call node into compiled C++ code. 797 class CallRuntimeNode : public CallNode { 798 virtual uint cmp( const Node &n ) const; 799 virtual uint size_of() const; // Size is bigger 800 public: 801 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 802 const TypePtr* adr_type) 803 : CallNode(tf, addr, adr_type), 804 _name(name) 805 { 806 init_class_id(Class_CallRuntime); 807 } 808 809 const char *_name; // Printable name, if _method is NULL 810 virtual int Opcode() const; 811 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 812 813 #ifndef PRODUCT 814 virtual void dump_spec(outputStream *st) const; 815 #endif 816 }; 817 818 //------------------------------CallLeafNode----------------------------------- 819 // Make a direct subroutine call node into compiled C++ code, without 820 // safepoints 821 class CallLeafNode : public CallRuntimeNode { 822 public: 823 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 824 const TypePtr* adr_type) 825 : CallRuntimeNode(tf, addr, name, adr_type) 826 { 827 init_class_id(Class_CallLeaf); 828 } 829 virtual int Opcode() const; 830 virtual bool guaranteed_safepoint() { return false; } 831 #ifndef PRODUCT 832 virtual void dump_spec(outputStream *st) const; 833 #endif 834 }; 835 836 //------------------------------CallLeafNoFPNode------------------------------- 837 // CallLeafNode, not using floating point or using it in the same manner as 838 // the generated code 839 class CallLeafNoFPNode : public CallLeafNode { 840 public: 841 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 842 const TypePtr* adr_type) 843 : CallLeafNode(tf, addr, name, adr_type) 844 { 845 } 846 virtual int Opcode() const; 847 }; 848 849 850 //------------------------------Allocate--------------------------------------- 851 // High-level memory allocation 852 // 853 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 854 // get expanded into a code sequence containing a call. Unlike other CallNodes, 855 // they have 2 memory projections and 2 i_o projections (which are distinguished by 856 // the _is_io_use flag in the projection.) This is needed when expanding the node in 857 // order to differentiate the uses of the projection on the normal control path from 858 // those on the exception return path. 859 // 860 class AllocateNode : public CallNode { 861 public: 862 enum { 863 // Output: 864 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 865 // Inputs: 866 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 867 KlassNode, // type (maybe dynamic) of the obj. 868 InitialTest, // slow-path test (may be constant) 869 ALength, // array length (or TOP if none) 870 ParmLimit 871 }; 872 873 static const TypeFunc* alloc_type(const Type* t) { 874 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 875 fields[AllocSize] = TypeInt::POS; 876 fields[KlassNode] = TypeInstPtr::NOTNULL; 877 fields[InitialTest] = TypeInt::BOOL; 878 fields[ALength] = t; // length (can be a bad length) 879 880 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 881 882 // create result type (range) 883 fields = TypeTuple::fields(1); 884 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 885 886 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 887 888 return TypeFunc::make(domain, range); 889 } 890 891 // Result of Escape Analysis 892 bool _is_scalar_replaceable; 893 bool _is_non_escaping; 894 895 virtual uint size_of() const; // Size is bigger 896 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 897 Node *size, Node *klass_node, Node *initial_test); 898 // Expansion modifies the JVMState, so we need to clone it 899 virtual void clone_jvms(Compile* C) { 900 if (jvms() != NULL) { 901 set_jvms(jvms()->clone_deep(C)); 902 jvms()->set_map_deep(this); 903 } 904 } 905 virtual int Opcode() const; 906 virtual uint ideal_reg() const { return Op_RegP; } 907 virtual bool guaranteed_safepoint() { return false; } 908 909 // allocations do not modify their arguments 910 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 911 912 // Pattern-match a possible usage of AllocateNode. 913 // Return null if no allocation is recognized. 914 // The operand is the pointer produced by the (possible) allocation. 915 // It must be a projection of the Allocate or its subsequent CastPP. 916 // (Note: This function is defined in file graphKit.cpp, near 917 // GraphKit::new_instance/new_array, whose output it recognizes.) 918 // The 'ptr' may not have an offset unless the 'offset' argument is given. 919 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 920 921 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 922 // an offset, which is reported back to the caller. 923 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 924 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 925 intptr_t& offset); 926 927 // Dig the klass operand out of a (possible) allocation site. 928 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 929 AllocateNode* allo = Ideal_allocation(ptr, phase); 930 return (allo == NULL) ? NULL : allo->in(KlassNode); 931 } 932 933 // Conservatively small estimate of offset of first non-header byte. 934 int minimum_header_size() { 935 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 936 instanceOopDesc::base_offset_in_bytes(); 937 } 938 939 // Return the corresponding initialization barrier (or null if none). 940 // Walks out edges to find it... 941 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 942 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 943 InitializeNode* initialization(); 944 945 // Convenience for initialization->maybe_set_complete(phase) 946 bool maybe_set_complete(PhaseGVN* phase); 947 }; 948 949 //------------------------------AllocateArray--------------------------------- 950 // 951 // High-level array allocation 952 // 953 class AllocateArrayNode : public AllocateNode { 954 public: 955 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 956 Node* size, Node* klass_node, Node* initial_test, 957 Node* count_val 958 ) 959 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 960 initial_test) 961 { 962 init_class_id(Class_AllocateArray); 963 set_req(AllocateNode::ALength, count_val); 964 } 965 virtual int Opcode() const; 966 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 967 968 // Dig the length operand out of a array allocation site. 969 Node* Ideal_length() { 970 return in(AllocateNode::ALength); 971 } 972 973 // Dig the length operand out of a array allocation site and narrow the 974 // type with a CastII, if necesssary 975 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 976 977 // Pattern-match a possible usage of AllocateArrayNode. 978 // Return null if no allocation is recognized. 979 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 980 AllocateNode* allo = Ideal_allocation(ptr, phase); 981 return (allo == NULL || !allo->is_AllocateArray()) 982 ? NULL : allo->as_AllocateArray(); 983 } 984 }; 985 986 //------------------------------AbstractLockNode----------------------------------- 987 class AbstractLockNode: public CallNode { 988 private: 989 enum { 990 Regular = 0, // Normal lock 991 NonEscObj, // Lock is used for non escaping object 992 Coarsened, // Lock was coarsened 993 Nested // Nested lock 994 } _kind; 995 #ifndef PRODUCT 996 NamedCounter* _counter; 997 #endif 998 999 protected: 1000 // helper functions for lock elimination 1001 // 1002 1003 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1004 GrowableArray<AbstractLockNode*> &lock_ops); 1005 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1006 GrowableArray<AbstractLockNode*> &lock_ops); 1007 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1008 GrowableArray<AbstractLockNode*> &lock_ops); 1009 LockNode *find_matching_lock(UnlockNode* unlock); 1010 1011 // Update the counter to indicate that this lock was eliminated. 1012 void set_eliminated_lock_counter() PRODUCT_RETURN; 1013 1014 public: 1015 AbstractLockNode(const TypeFunc *tf) 1016 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 1017 _kind(Regular) 1018 { 1019 #ifndef PRODUCT 1020 _counter = NULL; 1021 #endif 1022 } 1023 virtual int Opcode() const = 0; 1024 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1025 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1026 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1027 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1028 1029 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1030 1031 virtual uint size_of() const { return sizeof(*this); } 1032 1033 bool is_eliminated() const { return (_kind != Regular); } 1034 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1035 bool is_coarsened() const { return (_kind == Coarsened); } 1036 bool is_nested() const { return (_kind == Nested); } 1037 1038 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1039 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1040 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1041 1042 // locking does not modify its arguments 1043 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 1044 1045 #ifndef PRODUCT 1046 void create_lock_counter(JVMState* s); 1047 NamedCounter* counter() const { return _counter; } 1048 #endif 1049 }; 1050 1051 //------------------------------Lock--------------------------------------- 1052 // High-level lock operation 1053 // 1054 // This is a subclass of CallNode because it is a macro node which gets expanded 1055 // into a code sequence containing a call. This node takes 3 "parameters": 1056 // 0 - object to lock 1057 // 1 - a BoxLockNode 1058 // 2 - a FastLockNode 1059 // 1060 class LockNode : public AbstractLockNode { 1061 public: 1062 1063 static const TypeFunc *lock_type() { 1064 // create input type (domain) 1065 const Type **fields = TypeTuple::fields(3); 1066 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1067 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1068 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1069 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1070 1071 // create result type (range) 1072 fields = TypeTuple::fields(0); 1073 1074 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1075 1076 return TypeFunc::make(domain,range); 1077 } 1078 1079 virtual int Opcode() const; 1080 virtual uint size_of() const; // Size is bigger 1081 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1082 init_class_id(Class_Lock); 1083 init_flags(Flag_is_macro); 1084 C->add_macro_node(this); 1085 } 1086 virtual bool guaranteed_safepoint() { return false; } 1087 1088 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1089 // Expansion modifies the JVMState, so we need to clone it 1090 virtual void clone_jvms(Compile* C) { 1091 if (jvms() != NULL) { 1092 set_jvms(jvms()->clone_deep(C)); 1093 jvms()->set_map_deep(this); 1094 } 1095 } 1096 1097 bool is_nested_lock_region(); // Is this Lock nested? 1098 }; 1099 1100 //------------------------------Unlock--------------------------------------- 1101 // High-level unlock operation 1102 class UnlockNode : public AbstractLockNode { 1103 public: 1104 virtual int Opcode() const; 1105 virtual uint size_of() const; // Size is bigger 1106 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1107 init_class_id(Class_Unlock); 1108 init_flags(Flag_is_macro); 1109 C->add_macro_node(this); 1110 } 1111 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1112 // unlock is never a safepoint 1113 virtual bool guaranteed_safepoint() { return false; } 1114 }; 1115 1116 #endif // SHARE_VM_OPTO_CALLNODE_HPP