1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLNODE_HPP 26 #define SHARE_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 36 // Portions of code courtesy of Clifford Click 37 38 // Optimization - Graph Style 39 40 class Chaitin; 41 class NamedCounter; 42 class MultiNode; 43 class SafePointNode; 44 class CallNode; 45 class CallJavaNode; 46 class CallStaticJavaNode; 47 class CallDynamicJavaNode; 48 class CallRuntimeNode; 49 class CallLeafNode; 50 class CallLeafNoFPNode; 51 class AllocateNode; 52 class AllocateArrayNode; 53 class BoxLockNode; 54 class LockNode; 55 class UnlockNode; 56 class JVMState; 57 class OopMap; 58 class State; 59 class StartNode; 60 class MachCallNode; 61 class FastLockNode; 62 63 //------------------------------StartNode-------------------------------------- 64 // The method start node 65 class StartNode : public MultiNode { 66 virtual bool cmp( const Node &n ) const; 67 virtual uint size_of() const; // Size is bigger 68 public: 69 const TypeTuple *_domain; 70 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 71 init_class_id(Class_Start); 72 init_req(0,this); 73 init_req(1,root); 74 } 75 virtual int Opcode() const; 76 virtual bool pinned() const { return true; }; 77 virtual const Type *bottom_type() const; 78 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 79 virtual const Type* Value(PhaseGVN* phase) const; 80 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 81 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 82 virtual const RegMask &in_RegMask(uint) const; 83 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 84 virtual uint ideal_reg() const { return 0; } 85 #ifndef PRODUCT 86 virtual void dump_spec(outputStream *st) const; 87 virtual void dump_compact_spec(outputStream *st) const; 88 #endif 89 }; 90 91 //------------------------------StartOSRNode----------------------------------- 92 // The method start node for on stack replacement code 93 class StartOSRNode : public StartNode { 94 public: 95 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 96 virtual int Opcode() const; 97 }; 98 99 100 //------------------------------ParmNode--------------------------------------- 101 // Incoming parameters 102 class ParmNode : public ProjNode { 103 static const char * const names[TypeFunc::Parms+1]; 104 public: 105 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 106 init_class_id(Class_Parm); 107 } 108 virtual int Opcode() const; 109 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 110 virtual uint ideal_reg() const; 111 #ifndef PRODUCT 112 virtual void dump_spec(outputStream *st) const; 113 virtual void dump_compact_spec(outputStream *st) const; 114 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 115 #endif 116 }; 117 118 119 //------------------------------ReturnNode------------------------------------- 120 // Return from subroutine node 121 class ReturnNode : public Node { 122 public: 123 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 124 virtual int Opcode() const; 125 virtual bool is_CFG() const { return true; } 126 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 127 virtual bool depends_only_on_test() const { return false; } 128 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 129 virtual const Type* Value(PhaseGVN* phase) const; 130 virtual uint ideal_reg() const { return NotAMachineReg; } 131 virtual uint match_edge(uint idx) const; 132 #ifndef PRODUCT 133 virtual void dump_req(outputStream *st = tty) const; 134 #endif 135 }; 136 137 138 //------------------------------RethrowNode------------------------------------ 139 // Rethrow of exception at call site. Ends a procedure before rethrowing; 140 // ends the current basic block like a ReturnNode. Restores registers and 141 // unwinds stack. Rethrow happens in the caller's method. 142 class RethrowNode : public Node { 143 public: 144 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 145 virtual int Opcode() const; 146 virtual bool is_CFG() const { return true; } 147 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 148 virtual bool depends_only_on_test() const { return false; } 149 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 150 virtual const Type* Value(PhaseGVN* phase) const; 151 virtual uint match_edge(uint idx) const; 152 virtual uint ideal_reg() const { return NotAMachineReg; } 153 #ifndef PRODUCT 154 virtual void dump_req(outputStream *st = tty) const; 155 #endif 156 }; 157 158 159 //------------------------------TailCallNode----------------------------------- 160 // Pop stack frame and jump indirect 161 class TailCallNode : public ReturnNode { 162 public: 163 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 164 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 165 init_req(TypeFunc::Parms, target); 166 init_req(TypeFunc::Parms+1, moop); 167 } 168 169 virtual int Opcode() const; 170 virtual uint match_edge(uint idx) const; 171 }; 172 173 //------------------------------TailJumpNode----------------------------------- 174 // Pop stack frame and jump indirect 175 class TailJumpNode : public ReturnNode { 176 public: 177 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 178 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 179 init_req(TypeFunc::Parms, target); 180 init_req(TypeFunc::Parms+1, ex_oop); 181 } 182 183 virtual int Opcode() const; 184 virtual uint match_edge(uint idx) const; 185 }; 186 187 //-------------------------------JVMState------------------------------------- 188 // A linked list of JVMState nodes captures the whole interpreter state, 189 // plus GC roots, for all active calls at some call site in this compilation 190 // unit. (If there is no inlining, then the list has exactly one link.) 191 // This provides a way to map the optimized program back into the interpreter, 192 // or to let the GC mark the stack. 193 class JVMState : public ResourceObj { 194 friend class VMStructs; 195 public: 196 typedef enum { 197 Reexecute_Undefined = -1, // not defined -- will be translated into false later 198 Reexecute_False = 0, // false -- do not reexecute 199 Reexecute_True = 1 // true -- reexecute the bytecode 200 } ReexecuteState; //Reexecute State 201 202 private: 203 JVMState* _caller; // List pointer for forming scope chains 204 uint _depth; // One more than caller depth, or one. 205 uint _locoff; // Offset to locals in input edge mapping 206 uint _stkoff; // Offset to stack in input edge mapping 207 uint _monoff; // Offset to monitors in input edge mapping 208 uint _scloff; // Offset to fields of scalar objs in input edge mapping 209 uint _endoff; // Offset to end of input edge mapping 210 uint _sp; // Jave Expression Stack Pointer for this state 211 int _bci; // Byte Code Index of this JVM point 212 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 213 ciMethod* _method; // Method Pointer 214 SafePointNode* _map; // Map node associated with this scope 215 public: 216 friend class Compile; 217 friend class PreserveReexecuteState; 218 219 // Because JVMState objects live over the entire lifetime of the 220 // Compile object, they are allocated into the comp_arena, which 221 // does not get resource marked or reset during the compile process 222 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 223 void operator delete( void * ) { } // fast deallocation 224 225 // Create a new JVMState, ready for abstract interpretation. 226 JVMState(ciMethod* method, JVMState* caller); 227 JVMState(int stack_size); // root state; has a null method 228 229 // Access functions for the JVM 230 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 231 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 232 uint locoff() const { return _locoff; } 233 uint stkoff() const { return _stkoff; } 234 uint argoff() const { return _stkoff + _sp; } 235 uint monoff() const { return _monoff; } 236 uint scloff() const { return _scloff; } 237 uint endoff() const { return _endoff; } 238 uint oopoff() const { return debug_end(); } 239 240 int loc_size() const { return stkoff() - locoff(); } 241 int stk_size() const { return monoff() - stkoff(); } 242 int mon_size() const { return scloff() - monoff(); } 243 int scl_size() const { return endoff() - scloff(); } 244 245 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 246 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 247 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 248 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 249 250 uint sp() const { return _sp; } 251 int bci() const { return _bci; } 252 bool should_reexecute() const { return _reexecute==Reexecute_True; } 253 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 254 bool has_method() const { return _method != NULL; } 255 ciMethod* method() const { assert(has_method(), ""); return _method; } 256 JVMState* caller() const { return _caller; } 257 SafePointNode* map() const { return _map; } 258 uint depth() const { return _depth; } 259 uint debug_start() const; // returns locoff of root caller 260 uint debug_end() const; // returns endoff of self 261 uint debug_size() const { 262 return loc_size() + sp() + mon_size() + scl_size(); 263 } 264 uint debug_depth() const; // returns sum of debug_size values at all depths 265 266 // Returns the JVM state at the desired depth (1 == root). 267 JVMState* of_depth(int d) const; 268 269 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 270 bool same_calls_as(const JVMState* that) const; 271 272 // Monitors (monitors are stored as (boxNode, objNode) pairs 273 enum { logMonitorEdges = 1 }; 274 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 275 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 276 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 277 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 278 bool is_monitor_box(uint off) const { 279 assert(is_mon(off), "should be called only for monitor edge"); 280 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 281 } 282 bool is_monitor_use(uint off) const { return (is_mon(off) 283 && is_monitor_box(off)) 284 || (caller() && caller()->is_monitor_use(off)); } 285 286 // Initialization functions for the JVM 287 void set_locoff(uint off) { _locoff = off; } 288 void set_stkoff(uint off) { _stkoff = off; } 289 void set_monoff(uint off) { _monoff = off; } 290 void set_scloff(uint off) { _scloff = off; } 291 void set_endoff(uint off) { _endoff = off; } 292 void set_offsets(uint off) { 293 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 294 } 295 void set_map(SafePointNode *map) { _map = map; } 296 void set_sp(uint sp) { _sp = sp; } 297 // _reexecute is initialized to "undefined" for a new bci 298 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 299 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 300 301 // Miscellaneous utility functions 302 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 303 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 304 void set_map_deep(SafePointNode *map);// reset map for all callers 305 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 306 int interpreter_frame_size() const; 307 308 #ifndef PRODUCT 309 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 310 void dump_spec(outputStream *st) const; 311 void dump_on(outputStream* st) const; 312 void dump() const { 313 dump_on(tty); 314 } 315 #endif 316 }; 317 318 //------------------------------SafePointNode---------------------------------- 319 // A SafePointNode is a subclass of a MultiNode for convenience (and 320 // potential code sharing) only - conceptually it is independent of 321 // the Node semantics. 322 class SafePointNode : public MultiNode { 323 virtual bool cmp( const Node &n ) const; 324 virtual uint size_of() const; // Size is bigger 325 326 public: 327 SafePointNode(uint edges, JVMState* jvms, 328 // A plain safepoint advertises no memory effects (NULL): 329 const TypePtr* adr_type = NULL) 330 : MultiNode( edges ), 331 _oop_map(NULL), 332 _jvms(jvms), 333 _adr_type(adr_type) 334 { 335 init_class_id(Class_SafePoint); 336 } 337 338 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 339 JVMState* const _jvms; // Pointer to list of JVM State objects 340 const TypePtr* _adr_type; // What type of memory does this node produce? 341 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 342 343 // Many calls take *all* of memory as input, 344 // but some produce a limited subset of that memory as output. 345 // The adr_type reports the call's behavior as a store, not a load. 346 347 virtual JVMState* jvms() const { return _jvms; } 348 void set_jvms(JVMState* s) { 349 *(JVMState**)&_jvms = s; // override const attribute in the accessor 350 } 351 OopMap *oop_map() const { return _oop_map; } 352 void set_oop_map(OopMap *om) { _oop_map = om; } 353 354 private: 355 void verify_input(JVMState* jvms, uint idx) const { 356 assert(verify_jvms(jvms), "jvms must match"); 357 Node* n = in(idx); 358 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 359 in(idx + 1)->is_top(), "2nd half of long/double"); 360 } 361 362 public: 363 // Functionality from old debug nodes which has changed 364 Node *local(JVMState* jvms, uint idx) const { 365 verify_input(jvms, jvms->locoff() + idx); 366 return in(jvms->locoff() + idx); 367 } 368 Node *stack(JVMState* jvms, uint idx) const { 369 verify_input(jvms, jvms->stkoff() + idx); 370 return in(jvms->stkoff() + idx); 371 } 372 Node *argument(JVMState* jvms, uint idx) const { 373 verify_input(jvms, jvms->argoff() + idx); 374 return in(jvms->argoff() + idx); 375 } 376 Node *monitor_box(JVMState* jvms, uint idx) const { 377 assert(verify_jvms(jvms), "jvms must match"); 378 return in(jvms->monitor_box_offset(idx)); 379 } 380 Node *monitor_obj(JVMState* jvms, uint idx) const { 381 assert(verify_jvms(jvms), "jvms must match"); 382 return in(jvms->monitor_obj_offset(idx)); 383 } 384 385 void set_local(JVMState* jvms, uint idx, Node *c); 386 387 void set_stack(JVMState* jvms, uint idx, Node *c) { 388 assert(verify_jvms(jvms), "jvms must match"); 389 set_req(jvms->stkoff() + idx, c); 390 } 391 void set_argument(JVMState* jvms, uint idx, Node *c) { 392 assert(verify_jvms(jvms), "jvms must match"); 393 set_req(jvms->argoff() + idx, c); 394 } 395 void ensure_stack(JVMState* jvms, uint stk_size) { 396 assert(verify_jvms(jvms), "jvms must match"); 397 int grow_by = (int)stk_size - (int)jvms->stk_size(); 398 if (grow_by > 0) grow_stack(jvms, grow_by); 399 } 400 void grow_stack(JVMState* jvms, uint grow_by); 401 // Handle monitor stack 402 void push_monitor( const FastLockNode *lock ); 403 void pop_monitor (); 404 Node *peek_monitor_box() const; 405 Node *peek_monitor_obj() const; 406 407 // Access functions for the JVM 408 Node *control () const { return in(TypeFunc::Control ); } 409 Node *i_o () const { return in(TypeFunc::I_O ); } 410 Node *memory () const { return in(TypeFunc::Memory ); } 411 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 412 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 413 414 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 415 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 416 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 417 418 MergeMemNode* merged_memory() const { 419 return in(TypeFunc::Memory)->as_MergeMem(); 420 } 421 422 // The parser marks useless maps as dead when it's done with them: 423 bool is_killed() { return in(TypeFunc::Control) == NULL; } 424 425 // Exception states bubbling out of subgraphs such as inlined calls 426 // are recorded here. (There might be more than one, hence the "next".) 427 // This feature is used only for safepoints which serve as "maps" 428 // for JVM states during parsing, intrinsic expansion, etc. 429 SafePointNode* next_exception() const; 430 void set_next_exception(SafePointNode* n); 431 bool has_exceptions() const { return next_exception() != NULL; } 432 433 // Helper methods to operate on replaced nodes 434 ReplacedNodes replaced_nodes() const { 435 return _replaced_nodes; 436 } 437 438 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 439 _replaced_nodes = replaced_nodes; 440 } 441 442 void clone_replaced_nodes() { 443 _replaced_nodes.clone(); 444 } 445 void record_replaced_node(Node* initial, Node* improved) { 446 _replaced_nodes.record(initial, improved); 447 } 448 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 449 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 450 } 451 void delete_replaced_nodes() { 452 _replaced_nodes.reset(); 453 } 454 void apply_replaced_nodes(uint idx) { 455 _replaced_nodes.apply(this, idx); 456 } 457 void merge_replaced_nodes_with(SafePointNode* sfpt) { 458 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 459 } 460 bool has_replaced_nodes() const { 461 return !_replaced_nodes.is_empty(); 462 } 463 464 void disconnect_from_root(PhaseIterGVN *igvn); 465 466 // Standard Node stuff 467 virtual int Opcode() const; 468 virtual bool pinned() const { return true; } 469 virtual const Type* Value(PhaseGVN* phase) const; 470 virtual const Type *bottom_type() const { return Type::CONTROL; } 471 virtual const TypePtr *adr_type() const { return _adr_type; } 472 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 473 virtual Node* Identity(PhaseGVN* phase); 474 virtual uint ideal_reg() const { return 0; } 475 virtual const RegMask &in_RegMask(uint) const; 476 virtual const RegMask &out_RegMask() const; 477 virtual uint match_edge(uint idx) const; 478 479 static bool needs_polling_address_input(); 480 481 #ifndef PRODUCT 482 virtual void dump_spec(outputStream *st) const; 483 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 484 #endif 485 }; 486 487 //------------------------------SafePointScalarObjectNode---------------------- 488 // A SafePointScalarObjectNode represents the state of a scalarized object 489 // at a safepoint. 490 491 class SafePointScalarObjectNode: public TypeNode { 492 uint _first_index; // First input edge relative index of a SafePoint node where 493 // states of the scalarized object fields are collected. 494 // It is relative to the last (youngest) jvms->_scloff. 495 uint _n_fields; // Number of non-static fields of the scalarized object. 496 DEBUG_ONLY(AllocateNode* _alloc;) 497 498 virtual uint hash() const ; // { return NO_HASH; } 499 virtual bool cmp( const Node &n ) const; 500 501 uint first_index() const { return _first_index; } 502 503 public: 504 SafePointScalarObjectNode(const TypeOopPtr* tp, 505 #ifdef ASSERT 506 AllocateNode* alloc, 507 #endif 508 uint first_index, uint n_fields); 509 virtual int Opcode() const; 510 virtual uint ideal_reg() const; 511 virtual const RegMask &in_RegMask(uint) const; 512 virtual const RegMask &out_RegMask() const; 513 virtual uint match_edge(uint idx) const; 514 515 uint first_index(JVMState* jvms) const { 516 assert(jvms != NULL, "missed JVMS"); 517 return jvms->scloff() + _first_index; 518 } 519 uint n_fields() const { return _n_fields; } 520 521 #ifdef ASSERT 522 AllocateNode* alloc() const { return _alloc; } 523 #endif 524 525 virtual uint size_of() const { return sizeof(*this); } 526 527 // Assumes that "this" is an argument to a safepoint node "s", and that 528 // "new_call" is being created to correspond to "s". But the difference 529 // between the start index of the jvmstates of "new_call" and "s" is 530 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 531 // corresponds appropriately to "this" in "new_call". Assumes that 532 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 533 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 534 SafePointScalarObjectNode* clone(Dict* sosn_map) const; 535 536 #ifndef PRODUCT 537 virtual void dump_spec(outputStream *st) const; 538 #endif 539 }; 540 541 542 // Simple container for the outgoing projections of a call. Useful 543 // for serious surgery on calls. 544 class CallProjections { 545 public: 546 Node* fallthrough_proj; 547 Node* fallthrough_catchproj; 548 Node* fallthrough_memproj; 549 Node* fallthrough_ioproj; 550 Node* catchall_catchproj; 551 Node* catchall_memproj; 552 Node* catchall_ioproj; 553 Node* exobj; 554 uint nb_resproj; 555 Node* resproj[1]; // at least one projection 556 557 CallProjections(uint nbres) { 558 fallthrough_proj = NULL; 559 fallthrough_catchproj = NULL; 560 fallthrough_memproj = NULL; 561 fallthrough_ioproj = NULL; 562 catchall_catchproj = NULL; 563 catchall_memproj = NULL; 564 catchall_ioproj = NULL; 565 exobj = NULL; 566 nb_resproj = nbres; 567 resproj[0] = NULL; 568 for (uint i = 1; i < nb_resproj; i++) { 569 resproj[i] = NULL; 570 } 571 } 572 573 }; 574 575 class CallGenerator; 576 577 //------------------------------CallNode--------------------------------------- 578 // Call nodes now subsume the function of debug nodes at callsites, so they 579 // contain the functionality of a full scope chain of debug nodes. 580 class CallNode : public SafePointNode { 581 friend class VMStructs; 582 583 protected: 584 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase); 585 586 public: 587 const TypeFunc *_tf; // Function type 588 address _entry_point; // Address of method being called 589 float _cnt; // Estimate of number of times called 590 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 591 const char *_name; // Printable name, if _method is NULL 592 593 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 594 : SafePointNode(tf->domain_cc()->cnt(), NULL, adr_type), 595 _tf(tf), 596 _entry_point(addr), 597 _cnt(COUNT_UNKNOWN), 598 _generator(NULL), 599 _name(NULL) 600 { 601 init_class_id(Class_Call); 602 } 603 604 const TypeFunc* tf() const { return _tf; } 605 const address entry_point() const { return _entry_point; } 606 const float cnt() const { return _cnt; } 607 CallGenerator* generator() const { return _generator; } 608 609 void set_tf(const TypeFunc* tf) { _tf = tf; } 610 void set_entry_point(address p) { _entry_point = p; } 611 void set_cnt(float c) { _cnt = c; } 612 void set_generator(CallGenerator* cg) { _generator = cg; } 613 614 virtual const Type *bottom_type() const; 615 virtual const Type* Value(PhaseGVN* phase) const; 616 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 617 virtual Node* Identity(PhaseGVN* phase) { return this; } 618 virtual bool cmp( const Node &n ) const; 619 virtual uint size_of() const = 0; 620 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 621 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 622 virtual uint ideal_reg() const { return NotAMachineReg; } 623 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 624 // for some macro nodes whose expansion does not have a safepoint on the fast path. 625 virtual bool guaranteed_safepoint() { return true; } 626 // For macro nodes, the JVMState gets modified during expansion. If calls 627 // use MachConstantBase, it gets modified during matching. So when cloning 628 // the node the JVMState must be cloned. Default is not to clone. 629 virtual void clone_jvms(Compile* C) { 630 if (C->needs_clone_jvms() && jvms() != NULL) { 631 set_jvms(jvms()->clone_deep(C)); 632 jvms()->set_map_deep(this); 633 } 634 } 635 636 // Returns true if the call may modify n 637 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); 638 // Does this node have a use of n other than in debug information? 639 bool has_non_debug_use(Node *n); 640 bool has_debug_use(Node *n); 641 // Returns the unique CheckCastPP of a call 642 // or result projection is there are several CheckCastPP 643 // or returns NULL if there is no one. 644 Node *result_cast(); 645 // Does this node returns pointer? 646 bool returns_pointer() const { 647 const TypeTuple *r = tf()->range_sig(); 648 return (!tf()->returns_value_type_as_fields() && 649 r->cnt() > TypeFunc::Parms && 650 r->field_at(TypeFunc::Parms)->isa_ptr()); 651 } 652 653 // Collect all the interesting edges from a call for use in 654 // replacing the call by something else. Used by macro expansion 655 // and the late inlining support. 656 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true); 657 658 virtual uint match_edge(uint idx) const; 659 660 bool is_call_to_arraycopystub() const; 661 662 virtual void copy_call_debug_info(PhaseIterGVN* phase, CallNode *oldcall) {} 663 664 #ifndef PRODUCT 665 virtual void dump_req(outputStream *st = tty) const; 666 virtual void dump_spec(outputStream *st) const; 667 #endif 668 }; 669 670 671 //------------------------------CallJavaNode----------------------------------- 672 // Make a static or dynamic subroutine call node using Java calling 673 // convention. (The "Java" calling convention is the compiler's calling 674 // convention, as opposed to the interpreter's or that of native C.) 675 class CallJavaNode : public CallNode { 676 friend class VMStructs; 677 protected: 678 virtual bool cmp( const Node &n ) const; 679 virtual uint size_of() const; // Size is bigger 680 681 bool _optimized_virtual; 682 bool _method_handle_invoke; 683 bool _override_symbolic_info; // Override symbolic call site info from bytecode 684 ciMethod* _method; // Method being direct called 685 public: 686 const int _bci; // Byte Code Index of call byte code 687 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 688 : CallNode(tf, addr, TypePtr::BOTTOM), 689 _optimized_virtual(false), 690 _method_handle_invoke(false), 691 _override_symbolic_info(false), 692 _method(method), _bci(bci) 693 { 694 init_class_id(Class_CallJava); 695 } 696 697 virtual int Opcode() const; 698 ciMethod* method() const { return _method; } 699 void set_method(ciMethod *m) { _method = m; } 700 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 701 bool is_optimized_virtual() const { return _optimized_virtual; } 702 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 703 bool is_method_handle_invoke() const { return _method_handle_invoke; } 704 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } 705 bool override_symbolic_info() const { return _override_symbolic_info; } 706 707 void copy_call_debug_info(PhaseIterGVN* phase, CallNode *oldcall); 708 709 DEBUG_ONLY( bool validate_symbolic_info() const; ) 710 711 #ifndef PRODUCT 712 virtual void dump_spec(outputStream *st) const; 713 virtual void dump_compact_spec(outputStream *st) const; 714 #endif 715 }; 716 717 //------------------------------CallStaticJavaNode----------------------------- 718 // Make a direct subroutine call using Java calling convention (for static 719 // calls and optimized virtual calls, plus calls to wrappers for run-time 720 // routines); generates static stub. 721 class CallStaticJavaNode : public CallJavaNode { 722 virtual bool cmp( const Node &n ) const; 723 virtual uint size_of() const; // Size is bigger 724 725 bool remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg); 726 727 public: 728 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) 729 : CallJavaNode(tf, addr, method, bci) { 730 init_class_id(Class_CallStaticJava); 731 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 732 init_flags(Flag_is_macro); 733 C->add_macro_node(this); 734 } 735 const TypeTuple *r = tf->range_sig(); 736 if (ValueTypeReturnedAsFields && 737 method != NULL && 738 method->is_method_handle_intrinsic() && 739 r->cnt() > TypeFunc::Parms && 740 r->field_at(TypeFunc::Parms)->isa_oopptr() && 741 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_value_type()) { 742 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return 743 init_flags(Flag_is_macro); 744 C->add_macro_node(this); 745 } 746 747 _is_scalar_replaceable = false; 748 _is_non_escaping = false; 749 } 750 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 751 const TypePtr* adr_type) 752 : CallJavaNode(tf, addr, NULL, bci) { 753 init_class_id(Class_CallStaticJava); 754 // This node calls a runtime stub, which often has narrow memory effects. 755 _adr_type = adr_type; 756 _is_scalar_replaceable = false; 757 _is_non_escaping = false; 758 _name = name; 759 } 760 761 // Result of Escape Analysis 762 bool _is_scalar_replaceable; 763 bool _is_non_escaping; 764 765 // If this is an uncommon trap, return the request code, else zero. 766 int uncommon_trap_request() const; 767 static int extract_uncommon_trap_request(const Node* call); 768 769 bool is_boxing_method() const { 770 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 771 } 772 // Later inlining modifies the JVMState, so we need to clone it 773 // when the call node is cloned (because it is macro node). 774 virtual void clone_jvms(Compile* C) { 775 if ((jvms() != NULL) && is_boxing_method()) { 776 set_jvms(jvms()->clone_deep(C)); 777 jvms()->set_map_deep(this); 778 } 779 } 780 781 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 782 783 virtual int Opcode() const; 784 #ifndef PRODUCT 785 virtual void dump_spec(outputStream *st) const; 786 virtual void dump_compact_spec(outputStream *st) const; 787 #endif 788 }; 789 790 //------------------------------CallDynamicJavaNode---------------------------- 791 // Make a dispatched call using Java calling convention. 792 class CallDynamicJavaNode : public CallJavaNode { 793 virtual bool cmp( const Node &n ) const; 794 virtual uint size_of() const; // Size is bigger 795 public: 796 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 797 init_class_id(Class_CallDynamicJava); 798 } 799 800 int _vtable_index; 801 virtual int Opcode() const; 802 #ifndef PRODUCT 803 virtual void dump_spec(outputStream *st) const; 804 #endif 805 }; 806 807 //------------------------------CallRuntimeNode-------------------------------- 808 // Make a direct subroutine call node into compiled C++ code. 809 class CallRuntimeNode : public CallNode { 810 virtual bool cmp( const Node &n ) const; 811 virtual uint size_of() const; // Size is bigger 812 public: 813 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 814 const TypePtr* adr_type) 815 : CallNode(tf, addr, adr_type) 816 { 817 init_class_id(Class_CallRuntime); 818 _name = name; 819 } 820 821 virtual int Opcode() const; 822 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 823 824 #ifndef PRODUCT 825 virtual void dump_spec(outputStream *st) const; 826 #endif 827 }; 828 829 //------------------------------CallLeafNode----------------------------------- 830 // Make a direct subroutine call node into compiled C++ code, without 831 // safepoints 832 class CallLeafNode : public CallRuntimeNode { 833 public: 834 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 835 const TypePtr* adr_type) 836 : CallRuntimeNode(tf, addr, name, adr_type) 837 { 838 init_class_id(Class_CallLeaf); 839 } 840 virtual int Opcode() const; 841 virtual bool guaranteed_safepoint() { return false; } 842 #ifndef PRODUCT 843 virtual void dump_spec(outputStream *st) const; 844 #endif 845 }; 846 847 //------------------------------CallLeafNoFPNode------------------------------- 848 // CallLeafNode, not using floating point or using it in the same manner as 849 // the generated code 850 class CallLeafNoFPNode : public CallLeafNode { 851 public: 852 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 853 const TypePtr* adr_type) 854 : CallLeafNode(tf, addr, name, adr_type) 855 { 856 } 857 virtual int Opcode() const; 858 virtual uint match_edge(uint idx) const; 859 }; 860 861 862 //------------------------------Allocate--------------------------------------- 863 // High-level memory allocation 864 // 865 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 866 // get expanded into a code sequence containing a call. Unlike other CallNodes, 867 // they have 2 memory projections and 2 i_o projections (which are distinguished by 868 // the _is_io_use flag in the projection.) This is needed when expanding the node in 869 // order to differentiate the uses of the projection on the normal control path from 870 // those on the exception return path. 871 // 872 class AllocateNode : public CallNode { 873 public: 874 enum { 875 // Output: 876 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 877 // Inputs: 878 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 879 KlassNode, // type (maybe dynamic) of the obj. 880 InitialTest, // slow-path test (may be constant) 881 ALength, // array length (or TOP if none) 882 ValueNode, 883 DefaultValue, // default value in case of non flattened value array 884 RawDefaultValue, // same as above but as raw machine word 885 StorageProperties, // storage properties for arrays 886 ParmLimit 887 }; 888 889 static const TypeFunc* alloc_type(const Type* t) { 890 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 891 fields[AllocSize] = TypeInt::POS; 892 fields[KlassNode] = TypeInstPtr::NOTNULL; 893 fields[InitialTest] = TypeInt::BOOL; 894 fields[ALength] = t; // length (can be a bad length) 895 fields[ValueNode] = Type::BOTTOM; 896 fields[DefaultValue] = TypeInstPtr::NOTNULL; 897 fields[RawDefaultValue] = TypeX_X; 898 fields[StorageProperties] = TypeX_X; 899 900 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 901 902 // create result type (range) 903 fields = TypeTuple::fields(1); 904 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 905 906 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 907 908 return TypeFunc::make(domain, range); 909 } 910 911 // Result of Escape Analysis 912 bool _is_scalar_replaceable; 913 bool _is_non_escaping; 914 // True when MemBar for new is redundant with MemBar at initialzer exit 915 bool _is_allocation_MemBar_redundant; 916 bool _larval; 917 918 virtual uint size_of() const; // Size is bigger 919 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 920 Node *size, Node *klass_node, Node *initial_test, 921 ValueTypeBaseNode* value_node = NULL); 922 // Expansion modifies the JVMState, so we need to clone it 923 virtual void clone_jvms(Compile* C) { 924 if (jvms() != NULL) { 925 set_jvms(jvms()->clone_deep(C)); 926 jvms()->set_map_deep(this); 927 } 928 } 929 virtual int Opcode() const; 930 virtual uint ideal_reg() const { return Op_RegP; } 931 virtual bool guaranteed_safepoint() { return false; } 932 933 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 934 935 // allocations do not modify their arguments 936 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 937 938 // Pattern-match a possible usage of AllocateNode. 939 // Return null if no allocation is recognized. 940 // The operand is the pointer produced by the (possible) allocation. 941 // It must be a projection of the Allocate or its subsequent CastPP. 942 // (Note: This function is defined in file graphKit.cpp, near 943 // GraphKit::new_instance/new_array, whose output it recognizes.) 944 // The 'ptr' may not have an offset unless the 'offset' argument is given. 945 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 946 947 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 948 // an offset, which is reported back to the caller. 949 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 950 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 951 intptr_t& offset); 952 953 // Dig the klass operand out of a (possible) allocation site. 954 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 955 AllocateNode* allo = Ideal_allocation(ptr, phase); 956 return (allo == NULL) ? NULL : allo->in(KlassNode); 957 } 958 959 // Conservatively small estimate of offset of first non-header byte. 960 int minimum_header_size() { 961 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 962 instanceOopDesc::base_offset_in_bytes(); 963 } 964 965 // Return the corresponding initialization barrier (or null if none). 966 // Walks out edges to find it... 967 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 968 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 969 InitializeNode* initialization(); 970 971 // Convenience for initialization->maybe_set_complete(phase) 972 bool maybe_set_complete(PhaseGVN* phase); 973 974 // Return true if allocation doesn't escape thread, its escape state 975 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 976 // is true when its allocation's escape state is noEscape or 977 // ArgEscape. In case allocation's InitializeNode is NULL, check 978 // AlllocateNode._is_non_escaping flag. 979 // AlllocateNode._is_non_escaping is true when its escape state is 980 // noEscape. 981 bool does_not_escape_thread() { 982 InitializeNode* init = NULL; 983 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape()); 984 } 985 986 // If object doesn't escape in <.init> method and there is memory barrier 987 // inserted at exit of its <.init>, memory barrier for new is not necessary. 988 // Inovke this method when MemBar at exit of initializer and post-dominate 989 // allocation node. 990 void compute_MemBar_redundancy(ciMethod* initializer); 991 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } 992 993 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem); 994 }; 995 996 //------------------------------AllocateArray--------------------------------- 997 // 998 // High-level array allocation 999 // 1000 class AllocateArrayNode : public AllocateNode { 1001 public: 1002 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 1003 Node* size, Node* klass_node, Node* initial_test, 1004 Node* count_val, Node* default_value, Node* raw_default_value, Node* storage_properties) 1005 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, initial_test) 1006 { 1007 init_class_id(Class_AllocateArray); 1008 set_req(AllocateNode::ALength, count_val); 1009 init_req(AllocateNode::DefaultValue, default_value); 1010 init_req(AllocateNode::RawDefaultValue, raw_default_value); 1011 init_req(AllocateNode::StorageProperties, storage_properties); 1012 } 1013 virtual int Opcode() const; 1014 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1015 1016 // Dig the length operand out of a array allocation site. 1017 Node* Ideal_length() { 1018 return in(AllocateNode::ALength); 1019 } 1020 1021 // Dig the length operand out of a array allocation site and narrow the 1022 // type with a CastII, if necesssary 1023 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 1024 1025 // Pattern-match a possible usage of AllocateArrayNode. 1026 // Return null if no allocation is recognized. 1027 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 1028 AllocateNode* allo = Ideal_allocation(ptr, phase); 1029 return (allo == NULL || !allo->is_AllocateArray()) 1030 ? NULL : allo->as_AllocateArray(); 1031 } 1032 }; 1033 1034 //------------------------------AbstractLockNode----------------------------------- 1035 class AbstractLockNode: public CallNode { 1036 private: 1037 enum { 1038 Regular = 0, // Normal lock 1039 NonEscObj, // Lock is used for non escaping object 1040 Coarsened, // Lock was coarsened 1041 Nested // Nested lock 1042 } _kind; 1043 #ifndef PRODUCT 1044 NamedCounter* _counter; 1045 static const char* _kind_names[Nested+1]; 1046 #endif 1047 1048 protected: 1049 // helper functions for lock elimination 1050 // 1051 1052 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1053 GrowableArray<AbstractLockNode*> &lock_ops); 1054 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1055 GrowableArray<AbstractLockNode*> &lock_ops); 1056 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1057 GrowableArray<AbstractLockNode*> &lock_ops); 1058 LockNode *find_matching_lock(UnlockNode* unlock); 1059 1060 // Update the counter to indicate that this lock was eliminated. 1061 void set_eliminated_lock_counter() PRODUCT_RETURN; 1062 1063 public: 1064 AbstractLockNode(const TypeFunc *tf) 1065 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 1066 _kind(Regular) 1067 { 1068 #ifndef PRODUCT 1069 _counter = NULL; 1070 #endif 1071 } 1072 virtual int Opcode() const = 0; 1073 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1074 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1075 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1076 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1077 1078 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1079 1080 virtual uint size_of() const { return sizeof(*this); } 1081 1082 bool is_eliminated() const { return (_kind != Regular); } 1083 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1084 bool is_coarsened() const { return (_kind == Coarsened); } 1085 bool is_nested() const { return (_kind == Nested); } 1086 1087 const char * kind_as_string() const; 1088 void log_lock_optimization(Compile* c, const char * tag) const; 1089 1090 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1091 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1092 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1093 1094 // locking does not modify its arguments 1095 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 1096 1097 #ifndef PRODUCT 1098 void create_lock_counter(JVMState* s); 1099 NamedCounter* counter() const { return _counter; } 1100 virtual void dump_spec(outputStream* st) const; 1101 virtual void dump_compact_spec(outputStream* st) const; 1102 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1103 #endif 1104 }; 1105 1106 //------------------------------Lock--------------------------------------- 1107 // High-level lock operation 1108 // 1109 // This is a subclass of CallNode because it is a macro node which gets expanded 1110 // into a code sequence containing a call. This node takes 3 "parameters": 1111 // 0 - object to lock 1112 // 1 - a BoxLockNode 1113 // 2 - a FastLockNode 1114 // 1115 class LockNode : public AbstractLockNode { 1116 public: 1117 1118 static const TypeFunc *lock_type() { 1119 // create input type (domain) 1120 const Type **fields = TypeTuple::fields(3); 1121 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1122 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1123 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1124 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1125 1126 // create result type (range) 1127 fields = TypeTuple::fields(0); 1128 1129 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1130 1131 return TypeFunc::make(domain, range); 1132 } 1133 1134 virtual int Opcode() const; 1135 virtual uint size_of() const; // Size is bigger 1136 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1137 init_class_id(Class_Lock); 1138 init_flags(Flag_is_macro); 1139 C->add_macro_node(this); 1140 } 1141 virtual bool guaranteed_safepoint() { return false; } 1142 1143 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1144 // Expansion modifies the JVMState, so we need to clone it 1145 virtual void clone_jvms(Compile* C) { 1146 if (jvms() != NULL) { 1147 set_jvms(jvms()->clone_deep(C)); 1148 jvms()->set_map_deep(this); 1149 } 1150 } 1151 1152 bool is_nested_lock_region(); // Is this Lock nested? 1153 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1154 }; 1155 1156 //------------------------------Unlock--------------------------------------- 1157 // High-level unlock operation 1158 class UnlockNode : public AbstractLockNode { 1159 private: 1160 #ifdef ASSERT 1161 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1162 #endif 1163 public: 1164 virtual int Opcode() const; 1165 virtual uint size_of() const; // Size is bigger 1166 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1167 #ifdef ASSERT 1168 , _dbg_jvms(NULL) 1169 #endif 1170 { 1171 init_class_id(Class_Unlock); 1172 init_flags(Flag_is_macro); 1173 C->add_macro_node(this); 1174 } 1175 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1176 // unlock is never a safepoint 1177 virtual bool guaranteed_safepoint() { return false; } 1178 #ifdef ASSERT 1179 void set_dbg_jvms(JVMState* s) { 1180 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1181 } 1182 JVMState* dbg_jvms() const { return _dbg_jvms; } 1183 #else 1184 JVMState* dbg_jvms() const { return NULL; } 1185 #endif 1186 }; 1187 #endif // SHARE_OPTO_CALLNODE_HPP