1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP 26 #define SHARE_VM_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 36 // Portions of code courtesy of Clifford Click 37 38 // Optimization - Graph Style 39 40 class Chaitin; 41 class NamedCounter; 42 class MultiNode; 43 class SafePointNode; 44 class CallNode; 45 class CallJavaNode; 46 class CallStaticJavaNode; 47 class CallDynamicJavaNode; 48 class CallRuntimeNode; 49 class CallLeafNode; 50 class CallLeafNoFPNode; 51 class AllocateNode; 52 class AllocateArrayNode; 53 class BoxLockNode; 54 class LockNode; 55 class UnlockNode; 56 class JVMState; 57 class OopMap; 58 class State; 59 class StartNode; 60 class MachCallNode; 61 class FastLockNode; 62 63 //------------------------------StartNode-------------------------------------- 64 // The method start node 65 class StartNode : public MultiNode { 66 virtual uint cmp( const Node &n ) const; 67 virtual uint size_of() const; // Size is bigger 68 public: 69 const TypeTuple *_domain; 70 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 71 init_class_id(Class_Start); 72 init_req(0,this); 73 init_req(1,root); 74 } 75 virtual int Opcode() const; 76 virtual bool pinned() const { return true; }; 77 virtual const Type *bottom_type() const; 78 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 79 virtual const Type* Value(PhaseGVN* phase) const; 80 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 81 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 82 virtual const RegMask &in_RegMask(uint) const; 83 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 84 virtual uint ideal_reg() const { return 0; } 85 #ifndef PRODUCT 86 virtual void dump_spec(outputStream *st) const; 87 virtual void dump_compact_spec(outputStream *st) const; 88 #endif 89 }; 90 91 //------------------------------StartOSRNode----------------------------------- 92 // The method start node for on stack replacement code 93 class StartOSRNode : public StartNode { 94 public: 95 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 96 virtual int Opcode() const; 97 static const TypeTuple *osr_domain(); 98 }; 99 100 101 //------------------------------ParmNode--------------------------------------- 102 // Incoming parameters 103 class ParmNode : public ProjNode { 104 static const char * const names[TypeFunc::Parms+1]; 105 public: 106 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 107 init_class_id(Class_Parm); 108 } 109 virtual int Opcode() const; 110 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 111 virtual uint ideal_reg() const; 112 #ifndef PRODUCT 113 virtual void dump_spec(outputStream *st) const; 114 virtual void dump_compact_spec(outputStream *st) const; 115 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 116 #endif 117 }; 118 119 120 //------------------------------ReturnNode------------------------------------- 121 // Return from subroutine node 122 class ReturnNode : public Node { 123 public: 124 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 125 virtual int Opcode() const; 126 virtual bool is_CFG() const { return true; } 127 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 128 virtual bool depends_only_on_test() const { return false; } 129 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 130 virtual const Type* Value(PhaseGVN* phase) const; 131 virtual uint ideal_reg() const { return NotAMachineReg; } 132 virtual uint match_edge(uint idx) const; 133 #ifndef PRODUCT 134 virtual void dump_req(outputStream *st = tty) const; 135 #endif 136 }; 137 138 139 //------------------------------RethrowNode------------------------------------ 140 // Rethrow of exception at call site. Ends a procedure before rethrowing; 141 // ends the current basic block like a ReturnNode. Restores registers and 142 // unwinds stack. Rethrow happens in the caller's method. 143 class RethrowNode : public Node { 144 public: 145 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 146 virtual int Opcode() const; 147 virtual bool is_CFG() const { return true; } 148 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 149 virtual bool depends_only_on_test() const { return false; } 150 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 151 virtual const Type* Value(PhaseGVN* phase) const; 152 virtual uint match_edge(uint idx) const; 153 virtual uint ideal_reg() const { return NotAMachineReg; } 154 #ifndef PRODUCT 155 virtual void dump_req(outputStream *st = tty) const; 156 #endif 157 }; 158 159 160 //------------------------------TailCallNode----------------------------------- 161 // Pop stack frame and jump indirect 162 class TailCallNode : public ReturnNode { 163 public: 164 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 165 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 166 init_req(TypeFunc::Parms, target); 167 init_req(TypeFunc::Parms+1, moop); 168 } 169 170 virtual int Opcode() const; 171 virtual uint match_edge(uint idx) const; 172 }; 173 174 //------------------------------TailJumpNode----------------------------------- 175 // Pop stack frame and jump indirect 176 class TailJumpNode : public ReturnNode { 177 public: 178 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 179 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 180 init_req(TypeFunc::Parms, target); 181 init_req(TypeFunc::Parms+1, ex_oop); 182 } 183 184 virtual int Opcode() const; 185 virtual uint match_edge(uint idx) const; 186 }; 187 188 //-------------------------------JVMState------------------------------------- 189 // A linked list of JVMState nodes captures the whole interpreter state, 190 // plus GC roots, for all active calls at some call site in this compilation 191 // unit. (If there is no inlining, then the list has exactly one link.) 192 // This provides a way to map the optimized program back into the interpreter, 193 // or to let the GC mark the stack. 194 class JVMState : public ResourceObj { 195 friend class VMStructs; 196 public: 197 typedef enum { 198 Reexecute_Undefined = -1, // not defined -- will be translated into false later 199 Reexecute_False = 0, // false -- do not reexecute 200 Reexecute_True = 1 // true -- reexecute the bytecode 201 } ReexecuteState; //Reexecute State 202 203 private: 204 JVMState* _caller; // List pointer for forming scope chains 205 uint _depth; // One more than caller depth, or one. 206 uint _locoff; // Offset to locals in input edge mapping 207 uint _stkoff; // Offset to stack in input edge mapping 208 uint _monoff; // Offset to monitors in input edge mapping 209 uint _scloff; // Offset to fields of scalar objs in input edge mapping 210 uint _endoff; // Offset to end of input edge mapping 211 uint _sp; // Jave Expression Stack Pointer for this state 212 int _bci; // Byte Code Index of this JVM point 213 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 214 ciMethod* _method; // Method Pointer 215 SafePointNode* _map; // Map node associated with this scope 216 public: 217 friend class Compile; 218 friend class PreserveReexecuteState; 219 220 // Because JVMState objects live over the entire lifetime of the 221 // Compile object, they are allocated into the comp_arena, which 222 // does not get resource marked or reset during the compile process 223 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 224 void operator delete( void * ) { } // fast deallocation 225 226 // Create a new JVMState, ready for abstract interpretation. 227 JVMState(ciMethod* method, JVMState* caller); 228 JVMState(int stack_size); // root state; has a null method 229 230 // Access functions for the JVM 231 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 232 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 233 uint locoff() const { return _locoff; } 234 uint stkoff() const { return _stkoff; } 235 uint argoff() const { return _stkoff + _sp; } 236 uint monoff() const { return _monoff; } 237 uint scloff() const { return _scloff; } 238 uint endoff() const { return _endoff; } 239 uint oopoff() const { return debug_end(); } 240 241 int loc_size() const { return stkoff() - locoff(); } 242 int stk_size() const { return monoff() - stkoff(); } 243 int mon_size() const { return scloff() - monoff(); } 244 int scl_size() const { return endoff() - scloff(); } 245 246 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 247 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 248 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 249 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 250 251 uint sp() const { return _sp; } 252 int bci() const { return _bci; } 253 bool should_reexecute() const { return _reexecute==Reexecute_True; } 254 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 255 bool has_method() const { return _method != NULL; } 256 ciMethod* method() const { assert(has_method(), ""); return _method; } 257 JVMState* caller() const { return _caller; } 258 SafePointNode* map() const { return _map; } 259 uint depth() const { return _depth; } 260 uint debug_start() const; // returns locoff of root caller 261 uint debug_end() const; // returns endoff of self 262 uint debug_size() const { 263 return loc_size() + sp() + mon_size() + scl_size(); 264 } 265 uint debug_depth() const; // returns sum of debug_size values at all depths 266 267 // Returns the JVM state at the desired depth (1 == root). 268 JVMState* of_depth(int d) const; 269 270 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 271 bool same_calls_as(const JVMState* that) const; 272 273 // Monitors (monitors are stored as (boxNode, objNode) pairs 274 enum { logMonitorEdges = 1 }; 275 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 276 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 277 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 278 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 279 bool is_monitor_box(uint off) const { 280 assert(is_mon(off), "should be called only for monitor edge"); 281 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 282 } 283 bool is_monitor_use(uint off) const { return (is_mon(off) 284 && is_monitor_box(off)) 285 || (caller() && caller()->is_monitor_use(off)); } 286 287 // Initialization functions for the JVM 288 void set_locoff(uint off) { _locoff = off; } 289 void set_stkoff(uint off) { _stkoff = off; } 290 void set_monoff(uint off) { _monoff = off; } 291 void set_scloff(uint off) { _scloff = off; } 292 void set_endoff(uint off) { _endoff = off; } 293 void set_offsets(uint off) { 294 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 295 } 296 void set_map(SafePointNode *map) { _map = map; } 297 void set_sp(uint sp) { _sp = sp; } 298 // _reexecute is initialized to "undefined" for a new bci 299 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 300 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 301 302 // Miscellaneous utility functions 303 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 304 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 305 void set_map_deep(SafePointNode *map);// reset map for all callers 306 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 307 int interpreter_frame_size() const; 308 309 #ifndef PRODUCT 310 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 311 void dump_spec(outputStream *st) const; 312 void dump_on(outputStream* st) const; 313 void dump() const { 314 dump_on(tty); 315 } 316 #endif 317 }; 318 319 //------------------------------SafePointNode---------------------------------- 320 // A SafePointNode is a subclass of a MultiNode for convenience (and 321 // potential code sharing) only - conceptually it is independent of 322 // the Node semantics. 323 class SafePointNode : public MultiNode { 324 virtual uint cmp( const Node &n ) const; 325 virtual uint size_of() const; // Size is bigger 326 327 public: 328 SafePointNode(uint edges, JVMState* jvms, 329 // A plain safepoint advertises no memory effects (NULL): 330 const TypePtr* adr_type = NULL) 331 : MultiNode( edges ), 332 _jvms(jvms), 333 _oop_map(NULL), 334 _adr_type(adr_type) 335 { 336 init_class_id(Class_SafePoint); 337 } 338 339 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 340 JVMState* const _jvms; // Pointer to list of JVM State objects 341 const TypePtr* _adr_type; // What type of memory does this node produce? 342 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 343 344 // Many calls take *all* of memory as input, 345 // but some produce a limited subset of that memory as output. 346 // The adr_type reports the call's behavior as a store, not a load. 347 348 virtual JVMState* jvms() const { return _jvms; } 349 void set_jvms(JVMState* s) { 350 *(JVMState**)&_jvms = s; // override const attribute in the accessor 351 } 352 OopMap *oop_map() const { return _oop_map; } 353 void set_oop_map(OopMap *om) { _oop_map = om; } 354 355 private: 356 void verify_input(JVMState* jvms, uint idx) const { 357 assert(verify_jvms(jvms), "jvms must match"); 358 Node* n = in(idx); 359 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 360 in(idx + 1)->is_top(), "2nd half of long/double"); 361 } 362 363 public: 364 // Functionality from old debug nodes which has changed 365 Node *local(JVMState* jvms, uint idx) const { 366 verify_input(jvms, jvms->locoff() + idx); 367 return in(jvms->locoff() + idx); 368 } 369 Node *stack(JVMState* jvms, uint idx) const { 370 verify_input(jvms, jvms->stkoff() + idx); 371 return in(jvms->stkoff() + idx); 372 } 373 Node *argument(JVMState* jvms, uint idx) const { 374 verify_input(jvms, jvms->argoff() + idx); 375 return in(jvms->argoff() + idx); 376 } 377 Node *monitor_box(JVMState* jvms, uint idx) const { 378 assert(verify_jvms(jvms), "jvms must match"); 379 return in(jvms->monitor_box_offset(idx)); 380 } 381 Node *monitor_obj(JVMState* jvms, uint idx) const { 382 assert(verify_jvms(jvms), "jvms must match"); 383 return in(jvms->monitor_obj_offset(idx)); 384 } 385 386 void set_local(JVMState* jvms, uint idx, Node *c); 387 388 void set_stack(JVMState* jvms, uint idx, Node *c) { 389 assert(verify_jvms(jvms), "jvms must match"); 390 set_req(jvms->stkoff() + idx, c); 391 } 392 void set_argument(JVMState* jvms, uint idx, Node *c) { 393 assert(verify_jvms(jvms), "jvms must match"); 394 set_req(jvms->argoff() + idx, c); 395 } 396 void ensure_stack(JVMState* jvms, uint stk_size) { 397 assert(verify_jvms(jvms), "jvms must match"); 398 int grow_by = (int)stk_size - (int)jvms->stk_size(); 399 if (grow_by > 0) grow_stack(jvms, grow_by); 400 } 401 void grow_stack(JVMState* jvms, uint grow_by); 402 // Handle monitor stack 403 void push_monitor( const FastLockNode *lock ); 404 void pop_monitor (); 405 Node *peek_monitor_box() const; 406 Node *peek_monitor_obj() const; 407 408 // Access functions for the JVM 409 Node *control () const { return in(TypeFunc::Control ); } 410 Node *i_o () const { return in(TypeFunc::I_O ); } 411 Node *memory () const { return in(TypeFunc::Memory ); } 412 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 413 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 414 415 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 416 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 417 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 418 419 MergeMemNode* merged_memory() const { 420 return in(TypeFunc::Memory)->as_MergeMem(); 421 } 422 423 // The parser marks useless maps as dead when it's done with them: 424 bool is_killed() { return in(TypeFunc::Control) == NULL; } 425 426 // Exception states bubbling out of subgraphs such as inlined calls 427 // are recorded here. (There might be more than one, hence the "next".) 428 // This feature is used only for safepoints which serve as "maps" 429 // for JVM states during parsing, intrinsic expansion, etc. 430 SafePointNode* next_exception() const; 431 void set_next_exception(SafePointNode* n); 432 bool has_exceptions() const { return next_exception() != NULL; } 433 434 // Helper methods to operate on replaced nodes 435 ReplacedNodes replaced_nodes() const { 436 return _replaced_nodes; 437 } 438 439 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 440 _replaced_nodes = replaced_nodes; 441 } 442 443 void clone_replaced_nodes() { 444 _replaced_nodes.clone(); 445 } 446 void record_replaced_node(Node* initial, Node* improved) { 447 _replaced_nodes.record(initial, improved); 448 } 449 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 450 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 451 } 452 void delete_replaced_nodes() { 453 _replaced_nodes.reset(); 454 } 455 void apply_replaced_nodes(uint idx) { 456 _replaced_nodes.apply(this, idx); 457 } 458 void merge_replaced_nodes_with(SafePointNode* sfpt) { 459 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 460 } 461 bool has_replaced_nodes() const { 462 return !_replaced_nodes.is_empty(); 463 } 464 465 // Standard Node stuff 466 virtual int Opcode() const; 467 virtual bool pinned() const { return true; } 468 virtual const Type* Value(PhaseGVN* phase) const; 469 virtual const Type *bottom_type() const { return Type::CONTROL; } 470 virtual const TypePtr *adr_type() const { return _adr_type; } 471 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 472 virtual Node* Identity(PhaseGVN* phase); 473 virtual uint ideal_reg() const { return 0; } 474 virtual const RegMask &in_RegMask(uint) const; 475 virtual const RegMask &out_RegMask() const; 476 virtual uint match_edge(uint idx) const; 477 478 static bool needs_polling_address_input(); 479 480 #ifndef PRODUCT 481 virtual void dump_spec(outputStream *st) const; 482 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 483 #endif 484 }; 485 486 //------------------------------SafePointScalarObjectNode---------------------- 487 // A SafePointScalarObjectNode represents the state of a scalarized object 488 // at a safepoint. 489 490 class SafePointScalarObjectNode: public TypeNode { 491 uint _first_index; // First input edge relative index of a SafePoint node where 492 // states of the scalarized object fields are collected. 493 // It is relative to the last (youngest) jvms->_scloff. 494 uint _n_fields; // Number of non-static fields of the scalarized object. 495 DEBUG_ONLY(AllocateNode* _alloc;) 496 497 virtual uint hash() const ; // { return NO_HASH; } 498 virtual uint cmp( const Node &n ) const; 499 500 uint first_index() const { return _first_index; } 501 502 public: 503 SafePointScalarObjectNode(const TypeOopPtr* tp, 504 #ifdef ASSERT 505 AllocateNode* alloc, 506 #endif 507 uint first_index, uint n_fields); 508 virtual int Opcode() const; 509 virtual uint ideal_reg() const; 510 virtual const RegMask &in_RegMask(uint) const; 511 virtual const RegMask &out_RegMask() const; 512 virtual uint match_edge(uint idx) const; 513 514 uint first_index(JVMState* jvms) const { 515 assert(jvms != NULL, "missed JVMS"); 516 return jvms->scloff() + _first_index; 517 } 518 uint n_fields() const { return _n_fields; } 519 520 #ifdef ASSERT 521 AllocateNode* alloc() const { return _alloc; } 522 #endif 523 524 virtual uint size_of() const { return sizeof(*this); } 525 526 // Assumes that "this" is an argument to a safepoint node "s", and that 527 // "new_call" is being created to correspond to "s". But the difference 528 // between the start index of the jvmstates of "new_call" and "s" is 529 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 530 // corresponds appropriately to "this" in "new_call". Assumes that 531 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 532 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 533 SafePointScalarObjectNode* clone(Dict* sosn_map) const; 534 535 #ifndef PRODUCT 536 virtual void dump_spec(outputStream *st) const; 537 #endif 538 }; 539 540 541 // Simple container for the outgoing projections of a call. Useful 542 // for serious surgery on calls. 543 class CallProjections { 544 public: 545 Node* fallthrough_proj; 546 Node* fallthrough_catchproj; 547 Node* fallthrough_memproj; 548 Node* fallthrough_ioproj; 549 Node* catchall_catchproj; 550 Node* catchall_memproj; 551 Node* catchall_ioproj; 552 Node* exobj; 553 uint nb_resproj; 554 Node* resproj[1]; // at least one projection 555 556 CallProjections(uint nbres) { 557 fallthrough_proj = NULL; 558 fallthrough_catchproj = NULL; 559 fallthrough_memproj = NULL; 560 fallthrough_ioproj = NULL; 561 catchall_catchproj = NULL; 562 catchall_memproj = NULL; 563 catchall_ioproj = NULL; 564 exobj = NULL; 565 nb_resproj = nbres; 566 resproj[0] = NULL; 567 for (uint i = 1; i < nb_resproj; i++) { 568 resproj[i] = NULL; 569 } 570 } 571 572 }; 573 574 class CallGenerator; 575 576 //------------------------------CallNode--------------------------------------- 577 // Call nodes now subsume the function of debug nodes at callsites, so they 578 // contain the functionality of a full scope chain of debug nodes. 579 class CallNode : public SafePointNode { 580 friend class VMStructs; 581 582 protected: 583 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase); 584 585 public: 586 const TypeFunc *_tf; // Function type 587 address _entry_point; // Address of method being called 588 float _cnt; // Estimate of number of times called 589 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 590 const char *_name; // Printable name, if _method is NULL 591 592 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 593 : SafePointNode(tf->domain_cc()->cnt(), NULL, adr_type), 594 _tf(tf), 595 _entry_point(addr), 596 _cnt(COUNT_UNKNOWN), 597 _generator(NULL), 598 _name(NULL) 599 { 600 init_class_id(Class_Call); 601 } 602 603 const TypeFunc* tf() const { return _tf; } 604 const address entry_point() const { return _entry_point; } 605 const float cnt() const { return _cnt; } 606 CallGenerator* generator() const { return _generator; } 607 608 void set_tf(const TypeFunc* tf) { _tf = tf; } 609 void set_entry_point(address p) { _entry_point = p; } 610 void set_cnt(float c) { _cnt = c; } 611 void set_generator(CallGenerator* cg) { _generator = cg; } 612 613 virtual const Type *bottom_type() const; 614 virtual const Type* Value(PhaseGVN* phase) const; 615 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 616 virtual Node* Identity(PhaseGVN* phase) { return this; } 617 virtual uint cmp( const Node &n ) const; 618 virtual uint size_of() const = 0; 619 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 620 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 621 virtual uint ideal_reg() const { return NotAMachineReg; } 622 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 623 // for some macro nodes whose expansion does not have a safepoint on the fast path. 624 virtual bool guaranteed_safepoint() { return true; } 625 // For macro nodes, the JVMState gets modified during expansion. If calls 626 // use MachConstantBase, it gets modified during matching. So when cloning 627 // the node the JVMState must be cloned. Default is not to clone. 628 virtual void clone_jvms(Compile* C) { 629 if (C->needs_clone_jvms() && jvms() != NULL) { 630 set_jvms(jvms()->clone_deep(C)); 631 jvms()->set_map_deep(this); 632 } 633 } 634 635 // Returns true if the call may modify n 636 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); 637 // Does this node have a use of n other than in debug information? 638 bool has_non_debug_use(Node *n); 639 bool has_debug_use(Node *n); 640 // Returns the unique CheckCastPP of a call 641 // or result projection is there are several CheckCastPP 642 // or returns NULL if there is no one. 643 Node *result_cast(); 644 // Does this node returns pointer? 645 bool returns_pointer() const { 646 const TypeTuple *r = tf()->range_sig(); 647 return (!tf()->returns_value_type_as_fields() && 648 r->cnt() > TypeFunc::Parms && 649 r->field_at(TypeFunc::Parms)->isa_ptr()); 650 } 651 652 // Collect all the interesting edges from a call for use in 653 // replacing the call by something else. Used by macro expansion 654 // and the late inlining support. 655 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true); 656 657 virtual uint match_edge(uint idx) const; 658 659 bool is_call_to_arraycopystub() const; 660 661 #ifndef PRODUCT 662 virtual void dump_req(outputStream *st = tty) const; 663 virtual void dump_spec(outputStream *st) const; 664 #endif 665 }; 666 667 668 //------------------------------CallJavaNode----------------------------------- 669 // Make a static or dynamic subroutine call node using Java calling 670 // convention. (The "Java" calling convention is the compiler's calling 671 // convention, as opposed to the interpreter's or that of native C.) 672 class CallJavaNode : public CallNode { 673 friend class VMStructs; 674 protected: 675 virtual uint cmp( const Node &n ) const; 676 virtual uint size_of() const; // Size is bigger 677 678 bool _optimized_virtual; 679 bool _method_handle_invoke; 680 bool _override_symbolic_info; // Override symbolic call site info from bytecode 681 ciMethod* _method; // Method being direct called 682 public: 683 const int _bci; // Byte Code Index of call byte code 684 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 685 : CallNode(tf, addr, TypePtr::BOTTOM), 686 _method(method), _bci(bci), 687 _optimized_virtual(false), 688 _method_handle_invoke(false), 689 _override_symbolic_info(false) 690 { 691 init_class_id(Class_CallJava); 692 } 693 694 virtual int Opcode() const; 695 ciMethod* method() const { return _method; } 696 void set_method(ciMethod *m) { _method = m; } 697 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 698 bool is_optimized_virtual() const { return _optimized_virtual; } 699 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 700 bool is_method_handle_invoke() const { return _method_handle_invoke; } 701 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } 702 bool override_symbolic_info() const { return _override_symbolic_info; } 703 704 #ifndef PRODUCT 705 virtual void dump_spec(outputStream *st) const; 706 virtual void dump_compact_spec(outputStream *st) const; 707 #endif 708 }; 709 710 //------------------------------CallStaticJavaNode----------------------------- 711 // Make a direct subroutine call using Java calling convention (for static 712 // calls and optimized virtual calls, plus calls to wrappers for run-time 713 // routines); generates static stub. 714 class CallStaticJavaNode : public CallJavaNode { 715 virtual uint cmp( const Node &n ) const; 716 virtual uint size_of() const; // Size is bigger 717 public: 718 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) 719 : CallJavaNode(tf, addr, method, bci) { 720 init_class_id(Class_CallStaticJava); 721 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 722 init_flags(Flag_is_macro); 723 C->add_macro_node(this); 724 } 725 const TypeTuple *r = tf->range_sig(); 726 if (ValueTypeReturnedAsFields && 727 method != NULL && 728 method->is_method_handle_intrinsic() && 729 r->cnt() > TypeFunc::Parms && 730 r->field_at(TypeFunc::Parms)->isa_valuetypeptr() && 731 r->field_at(TypeFunc::Parms)->is_valuetypeptr()->is__Value()) { 732 init_flags(Flag_is_macro); 733 C->add_macro_node(this); 734 } 735 736 _is_scalar_replaceable = false; 737 _is_non_escaping = false; 738 } 739 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 740 const TypePtr* adr_type) 741 : CallJavaNode(tf, addr, NULL, bci) { 742 init_class_id(Class_CallStaticJava); 743 // This node calls a runtime stub, which often has narrow memory effects. 744 _adr_type = adr_type; 745 _is_scalar_replaceable = false; 746 _is_non_escaping = false; 747 _name = name; 748 } 749 750 // Result of Escape Analysis 751 bool _is_scalar_replaceable; 752 bool _is_non_escaping; 753 754 // If this is an uncommon trap, return the request code, else zero. 755 int uncommon_trap_request() const; 756 static int extract_uncommon_trap_request(const Node* call); 757 758 bool is_boxing_method() const { 759 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 760 } 761 // Later inlining modifies the JVMState, so we need to clone it 762 // when the call node is cloned (because it is macro node). 763 virtual void clone_jvms(Compile* C) { 764 if ((jvms() != NULL) && is_boxing_method()) { 765 set_jvms(jvms()->clone_deep(C)); 766 jvms()->set_map_deep(this); 767 } 768 } 769 770 virtual int Opcode() const; 771 #ifndef PRODUCT 772 virtual void dump_spec(outputStream *st) const; 773 virtual void dump_compact_spec(outputStream *st) const; 774 #endif 775 }; 776 777 //------------------------------CallDynamicJavaNode---------------------------- 778 // Make a dispatched call using Java calling convention. 779 class CallDynamicJavaNode : public CallJavaNode { 780 virtual uint cmp( const Node &n ) const; 781 virtual uint size_of() const; // Size is bigger 782 public: 783 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 784 init_class_id(Class_CallDynamicJava); 785 } 786 787 int _vtable_index; 788 virtual int Opcode() const; 789 #ifndef PRODUCT 790 virtual void dump_spec(outputStream *st) const; 791 #endif 792 }; 793 794 //------------------------------CallRuntimeNode-------------------------------- 795 // Make a direct subroutine call node into compiled C++ code. 796 class CallRuntimeNode : public CallNode { 797 virtual uint cmp( const Node &n ) const; 798 virtual uint size_of() const; // Size is bigger 799 public: 800 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 801 const TypePtr* adr_type) 802 : CallNode(tf, addr, adr_type) 803 { 804 init_class_id(Class_CallRuntime); 805 _name = name; 806 } 807 808 virtual int Opcode() const; 809 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 810 811 #ifndef PRODUCT 812 virtual void dump_spec(outputStream *st) const; 813 #endif 814 }; 815 816 //------------------------------CallLeafNode----------------------------------- 817 // Make a direct subroutine call node into compiled C++ code, without 818 // safepoints 819 class CallLeafNode : public CallRuntimeNode { 820 public: 821 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 822 const TypePtr* adr_type) 823 : CallRuntimeNode(tf, addr, name, adr_type) 824 { 825 init_class_id(Class_CallLeaf); 826 } 827 virtual int Opcode() const; 828 virtual bool guaranteed_safepoint() { return false; } 829 #ifndef PRODUCT 830 virtual void dump_spec(outputStream *st) const; 831 #endif 832 }; 833 834 //------------------------------CallLeafNoFPNode------------------------------- 835 // CallLeafNode, not using floating point or using it in the same manner as 836 // the generated code 837 class CallLeafNoFPNode : public CallLeafNode { 838 public: 839 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 840 const TypePtr* adr_type) 841 : CallLeafNode(tf, addr, name, adr_type) 842 { 843 } 844 virtual int Opcode() const; 845 virtual uint match_edge(uint idx) const; 846 }; 847 848 849 //------------------------------Allocate--------------------------------------- 850 // High-level memory allocation 851 // 852 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 853 // get expanded into a code sequence containing a call. Unlike other CallNodes, 854 // they have 2 memory projections and 2 i_o projections (which are distinguished by 855 // the _is_io_use flag in the projection.) This is needed when expanding the node in 856 // order to differentiate the uses of the projection on the normal control path from 857 // those on the exception return path. 858 // 859 class AllocateNode : public CallNode { 860 public: 861 enum { 862 // Output: 863 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 864 // Inputs: 865 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 866 KlassNode, // type (maybe dynamic) of the obj. 867 InitialTest, // slow-path test (may be constant) 868 ALength, // array length (or TOP if none) 869 ValueNode, 870 ParmLimit 871 }; 872 873 static const TypeFunc* alloc_type(const Type* t) { 874 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 875 fields[AllocSize] = TypeInt::POS; 876 fields[KlassNode] = TypeInstPtr::NOTNULL; 877 fields[InitialTest] = TypeInt::BOOL; 878 fields[ALength] = t; // length (can be a bad length) 879 fields[ValueNode] = Type::BOTTOM; 880 881 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 882 883 // create result type (range) 884 fields = TypeTuple::fields(1); 885 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 886 887 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 888 889 return TypeFunc::make(domain, range); 890 } 891 892 // Result of Escape Analysis 893 bool _is_scalar_replaceable; 894 bool _is_non_escaping; 895 // True when MemBar for new is redundant with MemBar at initialzer exit 896 bool _is_allocation_MemBar_redundant; 897 898 virtual uint size_of() const; // Size is bigger 899 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 900 Node *size, Node *klass_node, Node *initial_test, ValueTypeBaseNode* value_node = NULL); 901 // Expansion modifies the JVMState, so we need to clone it 902 virtual void clone_jvms(Compile* C) { 903 if (jvms() != NULL) { 904 set_jvms(jvms()->clone_deep(C)); 905 jvms()->set_map_deep(this); 906 } 907 } 908 virtual int Opcode() const; 909 virtual uint ideal_reg() const { return Op_RegP; } 910 virtual bool guaranteed_safepoint() { return false; } 911 912 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 913 914 // allocations do not modify their arguments 915 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 916 917 // Pattern-match a possible usage of AllocateNode. 918 // Return null if no allocation is recognized. 919 // The operand is the pointer produced by the (possible) allocation. 920 // It must be a projection of the Allocate or its subsequent CastPP. 921 // (Note: This function is defined in file graphKit.cpp, near 922 // GraphKit::new_instance/new_array, whose output it recognizes.) 923 // The 'ptr' may not have an offset unless the 'offset' argument is given. 924 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 925 926 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 927 // an offset, which is reported back to the caller. 928 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 929 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 930 intptr_t& offset); 931 932 // Dig the klass operand out of a (possible) allocation site. 933 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 934 AllocateNode* allo = Ideal_allocation(ptr, phase); 935 return (allo == NULL) ? NULL : allo->in(KlassNode); 936 } 937 938 // Conservatively small estimate of offset of first non-header byte. 939 int minimum_header_size() { 940 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 941 instanceOopDesc::base_offset_in_bytes(); 942 } 943 944 // Return the corresponding initialization barrier (or null if none). 945 // Walks out edges to find it... 946 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 947 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 948 InitializeNode* initialization(); 949 950 // Convenience for initialization->maybe_set_complete(phase) 951 bool maybe_set_complete(PhaseGVN* phase); 952 953 // Return true if allocation doesn't escape thread, its escape state 954 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 955 // is true when its allocation's escape state is noEscape or 956 // ArgEscape. In case allocation's InitializeNode is NULL, check 957 // AlllocateNode._is_non_escaping flag. 958 // AlllocateNode._is_non_escaping is true when its escape state is 959 // noEscape. 960 bool does_not_escape_thread() { 961 InitializeNode* init = NULL; 962 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape()); 963 } 964 965 // If object doesn't escape in <.init> method and there is memory barrier 966 // inserted at exit of its <.init>, memory barrier for new is not necessary. 967 // Inovke this method when MemBar at exit of initializer and post-dominate 968 // allocation node. 969 void compute_MemBar_redundancy(ciMethod* initializer); 970 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } 971 }; 972 973 //------------------------------AllocateArray--------------------------------- 974 // 975 // High-level array allocation 976 // 977 class AllocateArrayNode : public AllocateNode { 978 public: 979 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 980 Node* size, Node* klass_node, Node* initial_test, 981 Node* count_val 982 ) 983 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 984 initial_test) 985 { 986 init_class_id(Class_AllocateArray); 987 set_req(AllocateNode::ALength, count_val); 988 } 989 virtual int Opcode() const; 990 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 991 992 // Dig the length operand out of a array allocation site. 993 Node* Ideal_length() { 994 return in(AllocateNode::ALength); 995 } 996 997 // Dig the length operand out of a array allocation site and narrow the 998 // type with a CastII, if necesssary 999 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 1000 1001 // Pattern-match a possible usage of AllocateArrayNode. 1002 // Return null if no allocation is recognized. 1003 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 1004 AllocateNode* allo = Ideal_allocation(ptr, phase); 1005 return (allo == NULL || !allo->is_AllocateArray()) 1006 ? NULL : allo->as_AllocateArray(); 1007 } 1008 }; 1009 1010 //------------------------------AbstractLockNode----------------------------------- 1011 class AbstractLockNode: public CallNode { 1012 private: 1013 enum { 1014 Regular = 0, // Normal lock 1015 NonEscObj, // Lock is used for non escaping object 1016 Coarsened, // Lock was coarsened 1017 Nested // Nested lock 1018 } _kind; 1019 #ifndef PRODUCT 1020 NamedCounter* _counter; 1021 static const char* _kind_names[Nested+1]; 1022 #endif 1023 1024 protected: 1025 // helper functions for lock elimination 1026 // 1027 1028 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1029 GrowableArray<AbstractLockNode*> &lock_ops); 1030 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1031 GrowableArray<AbstractLockNode*> &lock_ops); 1032 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1033 GrowableArray<AbstractLockNode*> &lock_ops); 1034 LockNode *find_matching_lock(UnlockNode* unlock); 1035 1036 // Update the counter to indicate that this lock was eliminated. 1037 void set_eliminated_lock_counter() PRODUCT_RETURN; 1038 1039 public: 1040 AbstractLockNode(const TypeFunc *tf) 1041 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 1042 _kind(Regular) 1043 { 1044 #ifndef PRODUCT 1045 _counter = NULL; 1046 #endif 1047 } 1048 virtual int Opcode() const = 0; 1049 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1050 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1051 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1052 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1053 1054 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1055 1056 virtual uint size_of() const { return sizeof(*this); } 1057 1058 bool is_eliminated() const { return (_kind != Regular); } 1059 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1060 bool is_coarsened() const { return (_kind == Coarsened); } 1061 bool is_nested() const { return (_kind == Nested); } 1062 1063 const char * kind_as_string() const; 1064 void log_lock_optimization(Compile* c, const char * tag) const; 1065 1066 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1067 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1068 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1069 1070 // locking does not modify its arguments 1071 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 1072 1073 #ifndef PRODUCT 1074 void create_lock_counter(JVMState* s); 1075 NamedCounter* counter() const { return _counter; } 1076 virtual void dump_spec(outputStream* st) const; 1077 virtual void dump_compact_spec(outputStream* st) const; 1078 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1079 #endif 1080 }; 1081 1082 //------------------------------Lock--------------------------------------- 1083 // High-level lock operation 1084 // 1085 // This is a subclass of CallNode because it is a macro node which gets expanded 1086 // into a code sequence containing a call. This node takes 3 "parameters": 1087 // 0 - object to lock 1088 // 1 - a BoxLockNode 1089 // 2 - a FastLockNode 1090 // 1091 class LockNode : public AbstractLockNode { 1092 public: 1093 1094 static const TypeFunc *lock_type() { 1095 // create input type (domain) 1096 const Type **fields = TypeTuple::fields(3); 1097 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1098 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1099 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1100 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1101 1102 // create result type (range) 1103 fields = TypeTuple::fields(0); 1104 1105 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1106 1107 return TypeFunc::make(domain, range); 1108 } 1109 1110 virtual int Opcode() const; 1111 virtual uint size_of() const; // Size is bigger 1112 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1113 init_class_id(Class_Lock); 1114 init_flags(Flag_is_macro); 1115 C->add_macro_node(this); 1116 } 1117 virtual bool guaranteed_safepoint() { return false; } 1118 1119 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1120 // Expansion modifies the JVMState, so we need to clone it 1121 virtual void clone_jvms(Compile* C) { 1122 if (jvms() != NULL) { 1123 set_jvms(jvms()->clone_deep(C)); 1124 jvms()->set_map_deep(this); 1125 } 1126 } 1127 1128 bool is_nested_lock_region(); // Is this Lock nested? 1129 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1130 }; 1131 1132 //------------------------------Unlock--------------------------------------- 1133 // High-level unlock operation 1134 class UnlockNode : public AbstractLockNode { 1135 private: 1136 #ifdef ASSERT 1137 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1138 #endif 1139 public: 1140 virtual int Opcode() const; 1141 virtual uint size_of() const; // Size is bigger 1142 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1143 #ifdef ASSERT 1144 , _dbg_jvms(NULL) 1145 #endif 1146 { 1147 init_class_id(Class_Unlock); 1148 init_flags(Flag_is_macro); 1149 C->add_macro_node(this); 1150 } 1151 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1152 // unlock is never a safepoint 1153 virtual bool guaranteed_safepoint() { return false; } 1154 #ifdef ASSERT 1155 void set_dbg_jvms(JVMState* s) { 1156 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1157 } 1158 JVMState* dbg_jvms() const { return _dbg_jvms; } 1159 #else 1160 JVMState* dbg_jvms() const { return NULL; } 1161 #endif 1162 }; 1163 #endif // SHARE_VM_OPTO_CALLNODE_HPP