1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLNODE_HPP 26 #define SHARE_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 36 // Portions of code courtesy of Clifford Click 37 38 // Optimization - Graph Style 39 40 class Chaitin; 41 class NamedCounter; 42 class MultiNode; 43 class SafePointNode; 44 class CallNode; 45 class CallJavaNode; 46 class CallStaticJavaNode; 47 class CallDynamicJavaNode; 48 class CallRuntimeNode; 49 class CallLeafNode; 50 class CallLeafNoFPNode; 51 class AllocateNode; 52 class AllocateArrayNode; 53 class BoxLockNode; 54 class LockNode; 55 class UnlockNode; 56 class JVMState; 57 class OopMap; 58 class State; 59 class StartNode; 60 class MachCallNode; 61 class FastLockNode; 62 63 //------------------------------StartNode-------------------------------------- 64 // The method start node 65 class StartNode : public MultiNode { 66 virtual bool cmp( const Node &n ) const; 67 virtual uint size_of() const; // Size is bigger 68 public: 69 const TypeTuple *_domain; 70 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 71 init_class_id(Class_Start); 72 init_req(0,this); 73 init_req(1,root); 74 } 75 virtual int Opcode() const; 76 virtual bool pinned() const { return true; }; 77 virtual const Type *bottom_type() const; 78 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 79 virtual const Type* Value(PhaseGVN* phase) const; 80 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 81 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 82 virtual const RegMask &in_RegMask(uint) const; 83 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 84 virtual uint ideal_reg() const { return 0; } 85 #ifndef PRODUCT 86 virtual void dump_spec(outputStream *st) const; 87 virtual void dump_compact_spec(outputStream *st) const; 88 #endif 89 }; 90 91 //------------------------------StartOSRNode----------------------------------- 92 // The method start node for on stack replacement code 93 class StartOSRNode : public StartNode { 94 public: 95 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 96 virtual int Opcode() const; 97 static const TypeTuple *osr_domain(); 98 }; 99 100 101 //------------------------------ParmNode--------------------------------------- 102 // Incoming parameters 103 class ParmNode : public ProjNode { 104 static const char * const names[TypeFunc::Parms+1]; 105 public: 106 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 107 init_class_id(Class_Parm); 108 } 109 virtual int Opcode() const; 110 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 111 virtual uint ideal_reg() const; 112 #ifndef PRODUCT 113 virtual void dump_spec(outputStream *st) const; 114 virtual void dump_compact_spec(outputStream *st) const; 115 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 116 #endif 117 }; 118 119 120 //------------------------------ReturnNode------------------------------------- 121 // Return from subroutine node 122 class ReturnNode : public Node { 123 public: 124 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 125 virtual int Opcode() const; 126 virtual bool is_CFG() const { return true; } 127 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 128 virtual bool depends_only_on_test() const { return false; } 129 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 130 virtual const Type* Value(PhaseGVN* phase) const; 131 virtual uint ideal_reg() const { return NotAMachineReg; } 132 virtual uint match_edge(uint idx) const; 133 #ifndef PRODUCT 134 virtual void dump_req(outputStream *st = tty) const; 135 #endif 136 }; 137 138 139 //------------------------------RethrowNode------------------------------------ 140 // Rethrow of exception at call site. Ends a procedure before rethrowing; 141 // ends the current basic block like a ReturnNode. Restores registers and 142 // unwinds stack. Rethrow happens in the caller's method. 143 class RethrowNode : public Node { 144 public: 145 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 146 virtual int Opcode() const; 147 virtual bool is_CFG() const { return true; } 148 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 149 virtual bool depends_only_on_test() const { return false; } 150 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 151 virtual const Type* Value(PhaseGVN* phase) const; 152 virtual uint match_edge(uint idx) const; 153 virtual uint ideal_reg() const { return NotAMachineReg; } 154 #ifndef PRODUCT 155 virtual void dump_req(outputStream *st = tty) const; 156 #endif 157 }; 158 159 160 //------------------------------TailCallNode----------------------------------- 161 // Pop stack frame and jump indirect 162 class TailCallNode : public ReturnNode { 163 public: 164 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 165 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 166 init_req(TypeFunc::Parms, target); 167 init_req(TypeFunc::Parms+1, moop); 168 } 169 170 virtual int Opcode() const; 171 virtual uint match_edge(uint idx) const; 172 }; 173 174 //------------------------------TailJumpNode----------------------------------- 175 // Pop stack frame and jump indirect 176 class TailJumpNode : public ReturnNode { 177 public: 178 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 179 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 180 init_req(TypeFunc::Parms, target); 181 init_req(TypeFunc::Parms+1, ex_oop); 182 } 183 184 virtual int Opcode() const; 185 virtual uint match_edge(uint idx) const; 186 }; 187 188 //-------------------------------JVMState------------------------------------- 189 // A linked list of JVMState nodes captures the whole interpreter state, 190 // plus GC roots, for all active calls at some call site in this compilation 191 // unit. (If there is no inlining, then the list has exactly one link.) 192 // This provides a way to map the optimized program back into the interpreter, 193 // or to let the GC mark the stack. 194 class JVMState : public ResourceObj { 195 friend class VMStructs; 196 public: 197 typedef enum { 198 Reexecute_Undefined = -1, // not defined -- will be translated into false later 199 Reexecute_False = 0, // false -- do not reexecute 200 Reexecute_True = 1 // true -- reexecute the bytecode 201 } ReexecuteState; //Reexecute State 202 203 private: 204 JVMState* _caller; // List pointer for forming scope chains 205 uint _depth; // One more than caller depth, or one. 206 uint _locoff; // Offset to locals in input edge mapping 207 uint _stkoff; // Offset to stack in input edge mapping 208 uint _monoff; // Offset to monitors in input edge mapping 209 uint _scloff; // Offset to fields of scalar objs in input edge mapping 210 uint _endoff; // Offset to end of input edge mapping 211 uint _sp; // Jave Expression Stack Pointer for this state 212 int _bci; // Byte Code Index of this JVM point 213 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 214 ciMethod* _method; // Method Pointer 215 SafePointNode* _map; // Map node associated with this scope 216 public: 217 friend class Compile; 218 friend class PreserveReexecuteState; 219 220 // Because JVMState objects live over the entire lifetime of the 221 // Compile object, they are allocated into the comp_arena, which 222 // does not get resource marked or reset during the compile process 223 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 224 void operator delete( void * ) { } // fast deallocation 225 226 // Create a new JVMState, ready for abstract interpretation. 227 JVMState(ciMethod* method, JVMState* caller); 228 JVMState(int stack_size); // root state; has a null method 229 230 // Access functions for the JVM 231 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 232 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 233 uint locoff() const { return _locoff; } 234 uint stkoff() const { return _stkoff; } 235 uint argoff() const { return _stkoff + _sp; } 236 uint monoff() const { return _monoff; } 237 uint scloff() const { return _scloff; } 238 uint endoff() const { return _endoff; } 239 uint oopoff() const { return debug_end(); } 240 241 int loc_size() const { return stkoff() - locoff(); } 242 int stk_size() const { return monoff() - stkoff(); } 243 int mon_size() const { return scloff() - monoff(); } 244 int scl_size() const { return endoff() - scloff(); } 245 246 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 247 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 248 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 249 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 250 251 uint sp() const { return _sp; } 252 int bci() const { return _bci; } 253 bool should_reexecute() const { return _reexecute==Reexecute_True; } 254 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 255 bool has_method() const { return _method != NULL; } 256 ciMethod* method() const { assert(has_method(), ""); return _method; } 257 JVMState* caller() const { return _caller; } 258 SafePointNode* map() const { return _map; } 259 uint depth() const { return _depth; } 260 uint debug_start() const; // returns locoff of root caller 261 uint debug_end() const; // returns endoff of self 262 uint debug_size() const { 263 return loc_size() + sp() + mon_size() + scl_size(); 264 } 265 uint debug_depth() const; // returns sum of debug_size values at all depths 266 267 // Returns the JVM state at the desired depth (1 == root). 268 JVMState* of_depth(int d) const; 269 270 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 271 bool same_calls_as(const JVMState* that) const; 272 273 // Monitors (monitors are stored as (boxNode, objNode) pairs 274 enum { logMonitorEdges = 1 }; 275 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 276 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 277 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 278 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 279 bool is_monitor_box(uint off) const { 280 assert(is_mon(off), "should be called only for monitor edge"); 281 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 282 } 283 bool is_monitor_use(uint off) const { return (is_mon(off) 284 && is_monitor_box(off)) 285 || (caller() && caller()->is_monitor_use(off)); } 286 287 // Initialization functions for the JVM 288 void set_locoff(uint off) { _locoff = off; } 289 void set_stkoff(uint off) { _stkoff = off; } 290 void set_monoff(uint off) { _monoff = off; } 291 void set_scloff(uint off) { _scloff = off; } 292 void set_endoff(uint off) { _endoff = off; } 293 void set_offsets(uint off) { 294 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 295 } 296 void set_map(SafePointNode *map) { _map = map; } 297 void set_sp(uint sp) { _sp = sp; } 298 // _reexecute is initialized to "undefined" for a new bci 299 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 300 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 301 302 // Miscellaneous utility functions 303 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 304 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 305 void set_map_deep(SafePointNode *map);// reset map for all callers 306 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 307 int interpreter_frame_size() const; 308 309 #ifndef PRODUCT 310 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 311 void dump_spec(outputStream *st) const; 312 void dump_on(outputStream* st) const; 313 void dump() const { 314 dump_on(tty); 315 } 316 #endif 317 }; 318 319 //------------------------------SafePointNode---------------------------------- 320 // A SafePointNode is a subclass of a MultiNode for convenience (and 321 // potential code sharing) only - conceptually it is independent of 322 // the Node semantics. 323 class SafePointNode : public MultiNode { 324 virtual bool cmp( const Node &n ) const; 325 virtual uint size_of() const; // Size is bigger 326 327 public: 328 SafePointNode(uint edges, JVMState* jvms, 329 // A plain safepoint advertises no memory effects (NULL): 330 const TypePtr* adr_type = NULL) 331 : MultiNode( edges ), 332 _oop_map(NULL), 333 _jvms(jvms), 334 _adr_type(adr_type) 335 { 336 init_class_id(Class_SafePoint); 337 } 338 339 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 340 JVMState* const _jvms; // Pointer to list of JVM State objects 341 const TypePtr* _adr_type; // What type of memory does this node produce? 342 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 343 344 // Many calls take *all* of memory as input, 345 // but some produce a limited subset of that memory as output. 346 // The adr_type reports the call's behavior as a store, not a load. 347 348 virtual JVMState* jvms() const { return _jvms; } 349 void set_jvms(JVMState* s) { 350 *(JVMState**)&_jvms = s; // override const attribute in the accessor 351 } 352 OopMap *oop_map() const { return _oop_map; } 353 void set_oop_map(OopMap *om) { _oop_map = om; } 354 355 private: 356 void verify_input(JVMState* jvms, uint idx) const { 357 assert(verify_jvms(jvms), "jvms must match"); 358 Node* n = in(idx); 359 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 360 in(idx + 1)->is_top(), "2nd half of long/double"); 361 } 362 363 public: 364 // Functionality from old debug nodes which has changed 365 Node *local(JVMState* jvms, uint idx) const { 366 verify_input(jvms, jvms->locoff() + idx); 367 return in(jvms->locoff() + idx); 368 } 369 Node *stack(JVMState* jvms, uint idx) const { 370 verify_input(jvms, jvms->stkoff() + idx); 371 return in(jvms->stkoff() + idx); 372 } 373 Node *argument(JVMState* jvms, uint idx) const { 374 verify_input(jvms, jvms->argoff() + idx); 375 return in(jvms->argoff() + idx); 376 } 377 Node *monitor_box(JVMState* jvms, uint idx) const { 378 assert(verify_jvms(jvms), "jvms must match"); 379 return in(jvms->monitor_box_offset(idx)); 380 } 381 Node *monitor_obj(JVMState* jvms, uint idx) const { 382 assert(verify_jvms(jvms), "jvms must match"); 383 return in(jvms->monitor_obj_offset(idx)); 384 } 385 386 void set_local(JVMState* jvms, uint idx, Node *c); 387 388 void set_stack(JVMState* jvms, uint idx, Node *c) { 389 assert(verify_jvms(jvms), "jvms must match"); 390 set_req(jvms->stkoff() + idx, c); 391 } 392 void set_argument(JVMState* jvms, uint idx, Node *c) { 393 assert(verify_jvms(jvms), "jvms must match"); 394 set_req(jvms->argoff() + idx, c); 395 } 396 void ensure_stack(JVMState* jvms, uint stk_size) { 397 assert(verify_jvms(jvms), "jvms must match"); 398 int grow_by = (int)stk_size - (int)jvms->stk_size(); 399 if (grow_by > 0) grow_stack(jvms, grow_by); 400 } 401 void grow_stack(JVMState* jvms, uint grow_by); 402 // Handle monitor stack 403 void push_monitor( const FastLockNode *lock ); 404 void pop_monitor (); 405 Node *peek_monitor_box() const; 406 Node *peek_monitor_obj() const; 407 408 // Access functions for the JVM 409 Node *control () const { return in(TypeFunc::Control ); } 410 Node *i_o () const { return in(TypeFunc::I_O ); } 411 Node *memory () const { return in(TypeFunc::Memory ); } 412 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 413 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 414 415 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 416 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 417 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 418 419 MergeMemNode* merged_memory() const { 420 return in(TypeFunc::Memory)->as_MergeMem(); 421 } 422 423 // The parser marks useless maps as dead when it's done with them: 424 bool is_killed() { return in(TypeFunc::Control) == NULL; } 425 426 // Exception states bubbling out of subgraphs such as inlined calls 427 // are recorded here. (There might be more than one, hence the "next".) 428 // This feature is used only for safepoints which serve as "maps" 429 // for JVM states during parsing, intrinsic expansion, etc. 430 SafePointNode* next_exception() const; 431 void set_next_exception(SafePointNode* n); 432 bool has_exceptions() const { return next_exception() != NULL; } 433 434 // Helper methods to operate on replaced nodes 435 ReplacedNodes replaced_nodes() const { 436 return _replaced_nodes; 437 } 438 439 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 440 _replaced_nodes = replaced_nodes; 441 } 442 443 void clone_replaced_nodes() { 444 _replaced_nodes.clone(); 445 } 446 void record_replaced_node(Node* initial, Node* improved) { 447 _replaced_nodes.record(initial, improved); 448 } 449 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 450 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 451 } 452 void delete_replaced_nodes() { 453 _replaced_nodes.reset(); 454 } 455 void apply_replaced_nodes(uint idx) { 456 _replaced_nodes.apply(this, idx); 457 } 458 void merge_replaced_nodes_with(SafePointNode* sfpt) { 459 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 460 } 461 bool has_replaced_nodes() const { 462 return !_replaced_nodes.is_empty(); 463 } 464 465 void disconnect_from_root(PhaseIterGVN *igvn); 466 467 // Standard Node stuff 468 virtual int Opcode() const; 469 virtual bool pinned() const { return true; } 470 virtual const Type* Value(PhaseGVN* phase) const; 471 virtual const Type *bottom_type() const { return Type::CONTROL; } 472 virtual const TypePtr *adr_type() const { return _adr_type; } 473 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 474 virtual Node* Identity(PhaseGVN* phase); 475 virtual uint ideal_reg() const { return 0; } 476 virtual const RegMask &in_RegMask(uint) const; 477 virtual const RegMask &out_RegMask() const; 478 virtual uint match_edge(uint idx) const; 479 480 static bool needs_polling_address_input(); 481 482 #ifndef PRODUCT 483 virtual void dump_spec(outputStream *st) const; 484 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 485 #endif 486 }; 487 488 //------------------------------SafePointScalarObjectNode---------------------- 489 // A SafePointScalarObjectNode represents the state of a scalarized object 490 // at a safepoint. 491 492 class SafePointScalarObjectNode: public TypeNode { 493 uint _first_index; // First input edge relative index of a SafePoint node where 494 // states of the scalarized object fields are collected. 495 // It is relative to the last (youngest) jvms->_scloff. 496 uint _n_fields; // Number of non-static fields of the scalarized object. 497 DEBUG_ONLY(AllocateNode* _alloc;) 498 499 virtual uint hash() const ; // { return NO_HASH; } 500 virtual bool cmp( const Node &n ) const; 501 502 uint first_index() const { return _first_index; } 503 504 public: 505 SafePointScalarObjectNode(const TypeOopPtr* tp, 506 #ifdef ASSERT 507 AllocateNode* alloc, 508 #endif 509 uint first_index, uint n_fields); 510 virtual int Opcode() const; 511 virtual uint ideal_reg() const; 512 virtual const RegMask &in_RegMask(uint) const; 513 virtual const RegMask &out_RegMask() const; 514 virtual uint match_edge(uint idx) const; 515 516 uint first_index(JVMState* jvms) const { 517 assert(jvms != NULL, "missed JVMS"); 518 return jvms->scloff() + _first_index; 519 } 520 uint n_fields() const { return _n_fields; } 521 522 #ifdef ASSERT 523 AllocateNode* alloc() const { return _alloc; } 524 #endif 525 526 virtual uint size_of() const { return sizeof(*this); } 527 528 // Assumes that "this" is an argument to a safepoint node "s", and that 529 // "new_call" is being created to correspond to "s". But the difference 530 // between the start index of the jvmstates of "new_call" and "s" is 531 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 532 // corresponds appropriately to "this" in "new_call". Assumes that 533 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 534 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 535 SafePointScalarObjectNode* clone(Dict* sosn_map) const; 536 537 #ifndef PRODUCT 538 virtual void dump_spec(outputStream *st) const; 539 #endif 540 }; 541 542 543 // Simple container for the outgoing projections of a call. Useful 544 // for serious surgery on calls. 545 class CallProjections { 546 public: 547 Node* fallthrough_proj; 548 Node* fallthrough_catchproj; 549 Node* fallthrough_memproj; 550 Node* fallthrough_ioproj; 551 Node* catchall_catchproj; 552 Node* catchall_memproj; 553 Node* catchall_ioproj; 554 Node* exobj; 555 uint nb_resproj; 556 Node* resproj[1]; // at least one projection 557 558 CallProjections(uint nbres) { 559 fallthrough_proj = NULL; 560 fallthrough_catchproj = NULL; 561 fallthrough_memproj = NULL; 562 fallthrough_ioproj = NULL; 563 catchall_catchproj = NULL; 564 catchall_memproj = NULL; 565 catchall_ioproj = NULL; 566 exobj = NULL; 567 nb_resproj = nbres; 568 resproj[0] = NULL; 569 for (uint i = 1; i < nb_resproj; i++) { 570 resproj[i] = NULL; 571 } 572 } 573 574 }; 575 576 class CallGenerator; 577 578 //------------------------------CallNode--------------------------------------- 579 // Call nodes now subsume the function of debug nodes at callsites, so they 580 // contain the functionality of a full scope chain of debug nodes. 581 class CallNode : public SafePointNode { 582 friend class VMStructs; 583 584 protected: 585 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase); 586 587 public: 588 const TypeFunc *_tf; // Function type 589 address _entry_point; // Address of method being called 590 float _cnt; // Estimate of number of times called 591 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 592 const char *_name; // Printable name, if _method is NULL 593 594 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 595 : SafePointNode(tf->domain_cc()->cnt(), NULL, adr_type), 596 _tf(tf), 597 _entry_point(addr), 598 _cnt(COUNT_UNKNOWN), 599 _generator(NULL), 600 _name(NULL) 601 { 602 init_class_id(Class_Call); 603 } 604 605 const TypeFunc* tf() const { return _tf; } 606 const address entry_point() const { return _entry_point; } 607 const float cnt() const { return _cnt; } 608 CallGenerator* generator() const { return _generator; } 609 610 void set_tf(const TypeFunc* tf) { _tf = tf; } 611 void set_entry_point(address p) { _entry_point = p; } 612 void set_cnt(float c) { _cnt = c; } 613 void set_generator(CallGenerator* cg) { _generator = cg; } 614 615 virtual const Type *bottom_type() const; 616 virtual const Type* Value(PhaseGVN* phase) const; 617 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 618 virtual Node* Identity(PhaseGVN* phase) { return this; } 619 virtual bool cmp( const Node &n ) const; 620 virtual uint size_of() const = 0; 621 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 622 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 623 virtual uint ideal_reg() const { return NotAMachineReg; } 624 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 625 // for some macro nodes whose expansion does not have a safepoint on the fast path. 626 virtual bool guaranteed_safepoint() { return true; } 627 // For macro nodes, the JVMState gets modified during expansion. If calls 628 // use MachConstantBase, it gets modified during matching. So when cloning 629 // the node the JVMState must be cloned. Default is not to clone. 630 virtual void clone_jvms(Compile* C) { 631 if (C->needs_clone_jvms() && jvms() != NULL) { 632 set_jvms(jvms()->clone_deep(C)); 633 jvms()->set_map_deep(this); 634 } 635 } 636 637 // Returns true if the call may modify n 638 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); 639 // Does this node have a use of n other than in debug information? 640 bool has_non_debug_use(Node *n); 641 bool has_debug_use(Node *n); 642 // Returns the unique CheckCastPP of a call 643 // or result projection is there are several CheckCastPP 644 // or returns NULL if there is no one. 645 Node *result_cast(); 646 // Does this node returns pointer? 647 bool returns_pointer() const { 648 const TypeTuple *r = tf()->range_sig(); 649 return (!tf()->returns_value_type_as_fields() && 650 r->cnt() > TypeFunc::Parms && 651 r->field_at(TypeFunc::Parms)->isa_ptr()); 652 } 653 654 // Collect all the interesting edges from a call for use in 655 // replacing the call by something else. Used by macro expansion 656 // and the late inlining support. 657 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true); 658 659 virtual uint match_edge(uint idx) const; 660 661 bool is_call_to_arraycopystub() const; 662 663 #ifndef PRODUCT 664 virtual void dump_req(outputStream *st = tty) const; 665 virtual void dump_spec(outputStream *st) const; 666 #endif 667 }; 668 669 670 //------------------------------CallJavaNode----------------------------------- 671 // Make a static or dynamic subroutine call node using Java calling 672 // convention. (The "Java" calling convention is the compiler's calling 673 // convention, as opposed to the interpreter's or that of native C.) 674 class CallJavaNode : public CallNode { 675 friend class VMStructs; 676 protected: 677 virtual bool cmp( const Node &n ) const; 678 virtual uint size_of() const; // Size is bigger 679 680 bool _optimized_virtual; 681 bool _method_handle_invoke; 682 bool _override_symbolic_info; // Override symbolic call site info from bytecode 683 ciMethod* _method; // Method being direct called 684 public: 685 const int _bci; // Byte Code Index of call byte code 686 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 687 : CallNode(tf, addr, TypePtr::BOTTOM), 688 _optimized_virtual(false), 689 _method_handle_invoke(false), 690 _override_symbolic_info(false), 691 _method(method), _bci(bci) 692 { 693 init_class_id(Class_CallJava); 694 } 695 696 virtual int Opcode() const; 697 ciMethod* method() const { return _method; } 698 void set_method(ciMethod *m) { _method = m; } 699 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 700 bool is_optimized_virtual() const { return _optimized_virtual; } 701 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 702 bool is_method_handle_invoke() const { return _method_handle_invoke; } 703 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } 704 bool override_symbolic_info() const { return _override_symbolic_info; } 705 706 DEBUG_ONLY( bool validate_symbolic_info() const; ) 707 708 #ifndef PRODUCT 709 virtual void dump_spec(outputStream *st) const; 710 virtual void dump_compact_spec(outputStream *st) const; 711 #endif 712 }; 713 714 //------------------------------CallStaticJavaNode----------------------------- 715 // Make a direct subroutine call using Java calling convention (for static 716 // calls and optimized virtual calls, plus calls to wrappers for run-time 717 // routines); generates static stub. 718 class CallStaticJavaNode : public CallJavaNode { 719 virtual bool cmp( const Node &n ) const; 720 virtual uint size_of() const; // Size is bigger 721 public: 722 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) 723 : CallJavaNode(tf, addr, method, bci) { 724 init_class_id(Class_CallStaticJava); 725 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 726 init_flags(Flag_is_macro); 727 C->add_macro_node(this); 728 } 729 const TypeTuple *r = tf->range_sig(); 730 if (ValueTypeReturnedAsFields && 731 method != NULL && 732 method->is_method_handle_intrinsic() && 733 r->cnt() > TypeFunc::Parms && 734 r->field_at(TypeFunc::Parms)->isa_oopptr() && 735 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_value_type()) { 736 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return 737 init_flags(Flag_is_macro); 738 C->add_macro_node(this); 739 } 740 741 _is_scalar_replaceable = false; 742 _is_non_escaping = false; 743 } 744 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 745 const TypePtr* adr_type) 746 : CallJavaNode(tf, addr, NULL, bci) { 747 init_class_id(Class_CallStaticJava); 748 // This node calls a runtime stub, which often has narrow memory effects. 749 _adr_type = adr_type; 750 _is_scalar_replaceable = false; 751 _is_non_escaping = false; 752 _name = name; 753 } 754 755 // Result of Escape Analysis 756 bool _is_scalar_replaceable; 757 bool _is_non_escaping; 758 759 // If this is an uncommon trap, return the request code, else zero. 760 int uncommon_trap_request() const; 761 static int extract_uncommon_trap_request(const Node* call); 762 763 bool is_boxing_method() const { 764 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 765 } 766 // Later inlining modifies the JVMState, so we need to clone it 767 // when the call node is cloned (because it is macro node). 768 virtual void clone_jvms(Compile* C) { 769 if ((jvms() != NULL) && is_boxing_method()) { 770 set_jvms(jvms()->clone_deep(C)); 771 jvms()->set_map_deep(this); 772 } 773 } 774 775 virtual int Opcode() const; 776 #ifndef PRODUCT 777 virtual void dump_spec(outputStream *st) const; 778 virtual void dump_compact_spec(outputStream *st) const; 779 #endif 780 }; 781 782 //------------------------------CallDynamicJavaNode---------------------------- 783 // Make a dispatched call using Java calling convention. 784 class CallDynamicJavaNode : public CallJavaNode { 785 virtual bool cmp( const Node &n ) const; 786 virtual uint size_of() const; // Size is bigger 787 public: 788 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 789 init_class_id(Class_CallDynamicJava); 790 } 791 792 int _vtable_index; 793 virtual int Opcode() const; 794 #ifndef PRODUCT 795 virtual void dump_spec(outputStream *st) const; 796 #endif 797 }; 798 799 //------------------------------CallRuntimeNode-------------------------------- 800 // Make a direct subroutine call node into compiled C++ code. 801 class CallRuntimeNode : public CallNode { 802 virtual bool cmp( const Node &n ) const; 803 virtual uint size_of() const; // Size is bigger 804 public: 805 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 806 const TypePtr* adr_type) 807 : CallNode(tf, addr, adr_type) 808 { 809 init_class_id(Class_CallRuntime); 810 _name = name; 811 } 812 813 virtual int Opcode() const; 814 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 815 816 #ifndef PRODUCT 817 virtual void dump_spec(outputStream *st) const; 818 #endif 819 }; 820 821 //------------------------------CallLeafNode----------------------------------- 822 // Make a direct subroutine call node into compiled C++ code, without 823 // safepoints 824 class CallLeafNode : public CallRuntimeNode { 825 public: 826 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 827 const TypePtr* adr_type) 828 : CallRuntimeNode(tf, addr, name, adr_type) 829 { 830 init_class_id(Class_CallLeaf); 831 } 832 virtual int Opcode() const; 833 virtual bool guaranteed_safepoint() { return false; } 834 #ifndef PRODUCT 835 virtual void dump_spec(outputStream *st) const; 836 #endif 837 }; 838 839 //------------------------------CallLeafNoFPNode------------------------------- 840 // CallLeafNode, not using floating point or using it in the same manner as 841 // the generated code 842 class CallLeafNoFPNode : public CallLeafNode { 843 public: 844 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 845 const TypePtr* adr_type) 846 : CallLeafNode(tf, addr, name, adr_type) 847 { 848 } 849 virtual int Opcode() const; 850 virtual uint match_edge(uint idx) const; 851 }; 852 853 854 //------------------------------Allocate--------------------------------------- 855 // High-level memory allocation 856 // 857 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 858 // get expanded into a code sequence containing a call. Unlike other CallNodes, 859 // they have 2 memory projections and 2 i_o projections (which are distinguished by 860 // the _is_io_use flag in the projection.) This is needed when expanding the node in 861 // order to differentiate the uses of the projection on the normal control path from 862 // those on the exception return path. 863 // 864 class AllocateNode : public CallNode { 865 public: 866 enum { 867 // Output: 868 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 869 // Inputs: 870 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 871 KlassNode, // type (maybe dynamic) of the obj. 872 InitialTest, // slow-path test (may be constant) 873 ALength, // array length (or TOP if none) 874 ValueNode, 875 DefaultValue, // default value in case of non flattened value array 876 RawDefaultValue, // same as above but as raw machine word 877 StorageProperties, // storage properties for arrays 878 ParmLimit 879 }; 880 881 static const TypeFunc* alloc_type(const Type* t) { 882 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 883 fields[AllocSize] = TypeInt::POS; 884 fields[KlassNode] = TypeInstPtr::NOTNULL; 885 fields[InitialTest] = TypeInt::BOOL; 886 fields[ALength] = t; // length (can be a bad length) 887 fields[ValueNode] = Type::BOTTOM; 888 fields[DefaultValue] = TypeInstPtr::NOTNULL; 889 fields[RawDefaultValue] = TypeX_X; 890 fields[StorageProperties] = TypeX_X; 891 892 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 893 894 // create result type (range) 895 fields = TypeTuple::fields(1); 896 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 897 898 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 899 900 return TypeFunc::make(domain, range); 901 } 902 903 // Result of Escape Analysis 904 bool _is_scalar_replaceable; 905 bool _is_non_escaping; 906 // True when MemBar for new is redundant with MemBar at initialzer exit 907 bool _is_allocation_MemBar_redundant; 908 bool _larval; 909 910 virtual uint size_of() const; // Size is bigger 911 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 912 Node *size, Node *klass_node, Node *initial_test, 913 ValueTypeBaseNode* value_node = NULL); 914 // Expansion modifies the JVMState, so we need to clone it 915 virtual void clone_jvms(Compile* C) { 916 if (jvms() != NULL) { 917 set_jvms(jvms()->clone_deep(C)); 918 jvms()->set_map_deep(this); 919 } 920 } 921 virtual int Opcode() const; 922 virtual uint ideal_reg() const { return Op_RegP; } 923 virtual bool guaranteed_safepoint() { return false; } 924 925 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 926 927 // allocations do not modify their arguments 928 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 929 930 // Pattern-match a possible usage of AllocateNode. 931 // Return null if no allocation is recognized. 932 // The operand is the pointer produced by the (possible) allocation. 933 // It must be a projection of the Allocate or its subsequent CastPP. 934 // (Note: This function is defined in file graphKit.cpp, near 935 // GraphKit::new_instance/new_array, whose output it recognizes.) 936 // The 'ptr' may not have an offset unless the 'offset' argument is given. 937 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 938 939 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 940 // an offset, which is reported back to the caller. 941 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 942 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 943 intptr_t& offset); 944 945 // Dig the klass operand out of a (possible) allocation site. 946 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 947 AllocateNode* allo = Ideal_allocation(ptr, phase); 948 return (allo == NULL) ? NULL : allo->in(KlassNode); 949 } 950 951 // Conservatively small estimate of offset of first non-header byte. 952 int minimum_header_size() { 953 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 954 instanceOopDesc::base_offset_in_bytes(); 955 } 956 957 // Return the corresponding initialization barrier (or null if none). 958 // Walks out edges to find it... 959 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 960 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 961 InitializeNode* initialization(); 962 963 // Convenience for initialization->maybe_set_complete(phase) 964 bool maybe_set_complete(PhaseGVN* phase); 965 966 // Return true if allocation doesn't escape thread, its escape state 967 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 968 // is true when its allocation's escape state is noEscape or 969 // ArgEscape. In case allocation's InitializeNode is NULL, check 970 // AlllocateNode._is_non_escaping flag. 971 // AlllocateNode._is_non_escaping is true when its escape state is 972 // noEscape. 973 bool does_not_escape_thread() { 974 InitializeNode* init = NULL; 975 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape()); 976 } 977 978 // If object doesn't escape in <.init> method and there is memory barrier 979 // inserted at exit of its <.init>, memory barrier for new is not necessary. 980 // Inovke this method when MemBar at exit of initializer and post-dominate 981 // allocation node. 982 void compute_MemBar_redundancy(ciMethod* initializer); 983 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } 984 985 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem, Node* klass_node); 986 }; 987 988 //------------------------------AllocateArray--------------------------------- 989 // 990 // High-level array allocation 991 // 992 class AllocateArrayNode : public AllocateNode { 993 public: 994 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 995 Node* size, Node* klass_node, Node* initial_test, 996 Node* count_val, Node* default_value, Node* raw_default_value, Node* storage_properties) 997 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, initial_test) 998 { 999 init_class_id(Class_AllocateArray); 1000 set_req(AllocateNode::ALength, count_val); 1001 init_req(AllocateNode::DefaultValue, default_value); 1002 init_req(AllocateNode::RawDefaultValue, raw_default_value); 1003 init_req(AllocateNode::StorageProperties, storage_properties); 1004 } 1005 virtual int Opcode() const; 1006 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1007 1008 // Dig the length operand out of a array allocation site. 1009 Node* Ideal_length() { 1010 return in(AllocateNode::ALength); 1011 } 1012 1013 // Dig the length operand out of a array allocation site and narrow the 1014 // type with a CastII, if necesssary 1015 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 1016 1017 // Pattern-match a possible usage of AllocateArrayNode. 1018 // Return null if no allocation is recognized. 1019 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 1020 AllocateNode* allo = Ideal_allocation(ptr, phase); 1021 return (allo == NULL || !allo->is_AllocateArray()) 1022 ? NULL : allo->as_AllocateArray(); 1023 } 1024 }; 1025 1026 //------------------------------AbstractLockNode----------------------------------- 1027 class AbstractLockNode: public CallNode { 1028 private: 1029 enum { 1030 Regular = 0, // Normal lock 1031 NonEscObj, // Lock is used for non escaping object 1032 Coarsened, // Lock was coarsened 1033 Nested // Nested lock 1034 } _kind; 1035 #ifndef PRODUCT 1036 NamedCounter* _counter; 1037 static const char* _kind_names[Nested+1]; 1038 #endif 1039 1040 protected: 1041 // helper functions for lock elimination 1042 // 1043 1044 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1045 GrowableArray<AbstractLockNode*> &lock_ops); 1046 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1047 GrowableArray<AbstractLockNode*> &lock_ops); 1048 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1049 GrowableArray<AbstractLockNode*> &lock_ops); 1050 LockNode *find_matching_lock(UnlockNode* unlock); 1051 1052 // Update the counter to indicate that this lock was eliminated. 1053 void set_eliminated_lock_counter() PRODUCT_RETURN; 1054 1055 public: 1056 AbstractLockNode(const TypeFunc *tf) 1057 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 1058 _kind(Regular) 1059 { 1060 #ifndef PRODUCT 1061 _counter = NULL; 1062 #endif 1063 } 1064 virtual int Opcode() const = 0; 1065 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 1066 Node * box_node() const {return in(TypeFunc::Parms + 1); } 1067 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 1068 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1069 1070 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1071 1072 virtual uint size_of() const { return sizeof(*this); } 1073 1074 bool is_eliminated() const { return (_kind != Regular); } 1075 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 1076 bool is_coarsened() const { return (_kind == Coarsened); } 1077 bool is_nested() const { return (_kind == Nested); } 1078 1079 const char * kind_as_string() const; 1080 void log_lock_optimization(Compile* c, const char * tag) const; 1081 1082 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 1083 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 1084 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1085 1086 // locking does not modify its arguments 1087 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 1088 1089 #ifndef PRODUCT 1090 void create_lock_counter(JVMState* s); 1091 NamedCounter* counter() const { return _counter; } 1092 virtual void dump_spec(outputStream* st) const; 1093 virtual void dump_compact_spec(outputStream* st) const; 1094 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1095 #endif 1096 }; 1097 1098 //------------------------------Lock--------------------------------------- 1099 // High-level lock operation 1100 // 1101 // This is a subclass of CallNode because it is a macro node which gets expanded 1102 // into a code sequence containing a call. This node takes 3 "parameters": 1103 // 0 - object to lock 1104 // 1 - a BoxLockNode 1105 // 2 - a FastLockNode 1106 // 1107 class LockNode : public AbstractLockNode { 1108 public: 1109 1110 static const TypeFunc *lock_type() { 1111 // create input type (domain) 1112 const Type **fields = TypeTuple::fields(3); 1113 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1114 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1115 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1116 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1117 1118 // create result type (range) 1119 fields = TypeTuple::fields(0); 1120 1121 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1122 1123 return TypeFunc::make(domain, range); 1124 } 1125 1126 virtual int Opcode() const; 1127 virtual uint size_of() const; // Size is bigger 1128 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1129 init_class_id(Class_Lock); 1130 init_flags(Flag_is_macro); 1131 C->add_macro_node(this); 1132 } 1133 virtual bool guaranteed_safepoint() { return false; } 1134 1135 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1136 // Expansion modifies the JVMState, so we need to clone it 1137 virtual void clone_jvms(Compile* C) { 1138 if (jvms() != NULL) { 1139 set_jvms(jvms()->clone_deep(C)); 1140 jvms()->set_map_deep(this); 1141 } 1142 } 1143 1144 bool is_nested_lock_region(); // Is this Lock nested? 1145 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1146 }; 1147 1148 //------------------------------Unlock--------------------------------------- 1149 // High-level unlock operation 1150 class UnlockNode : public AbstractLockNode { 1151 private: 1152 #ifdef ASSERT 1153 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1154 #endif 1155 public: 1156 virtual int Opcode() const; 1157 virtual uint size_of() const; // Size is bigger 1158 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1159 #ifdef ASSERT 1160 , _dbg_jvms(NULL) 1161 #endif 1162 { 1163 init_class_id(Class_Unlock); 1164 init_flags(Flag_is_macro); 1165 C->add_macro_node(this); 1166 } 1167 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1168 // unlock is never a safepoint 1169 virtual bool guaranteed_safepoint() { return false; } 1170 #ifdef ASSERT 1171 void set_dbg_jvms(JVMState* s) { 1172 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1173 } 1174 JVMState* dbg_jvms() const { return _dbg_jvms; } 1175 #else 1176 JVMState* dbg_jvms() const { return NULL; } 1177 #endif 1178 }; 1179 #endif // SHARE_OPTO_CALLNODE_HPP