1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_NODE_HPP 26 #define SHARE_VM_OPTO_NODE_HPP 27 28 #include "libadt/port.hpp" 29 #include "libadt/vectset.hpp" 30 #include "opto/compile.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 // Optimization - Graph Style 36 37 38 class AbstractLockNode; 39 class AddNode; 40 class AddPNode; 41 class AliasInfo; 42 class AllocateArrayNode; 43 class AllocateNode; 44 class Block; 45 class BoolNode; 46 class BoxLockNode; 47 class CMoveNode; 48 class CallDynamicJavaNode; 49 class CallJavaNode; 50 class CallLeafNode; 51 class CallNode; 52 class CallRuntimeNode; 53 class CallStaticJavaNode; 54 class CatchNode; 55 class CatchProjNode; 56 class CheckCastPPNode; 57 class CastIINode; 58 class ClearArrayNode; 59 class CmpNode; 60 class CodeBuffer; 61 class ConstraintCastNode; 62 class ConNode; 63 class CountedLoopNode; 64 class CountedLoopEndNode; 65 class DecodeNarrowPtrNode; 66 class DecodeNNode; 67 class DecodeNKlassNode; 68 class EncodeNarrowPtrNode; 69 class EncodePNode; 70 class EncodePKlassNode; 71 class FastLockNode; 72 class FastUnlockNode; 73 class IfNode; 74 class IfFalseNode; 75 class IfTrueNode; 76 class InitializeNode; 77 class JVMState; 78 class JumpNode; 79 class JumpProjNode; 80 class LoadNode; 81 class LoadStoreNode; 82 class LockNode; 83 class LoopNode; 84 class MachBranchNode; 85 class MachCallDynamicJavaNode; 86 class MachCallJavaNode; 87 class MachCallLeafNode; 88 class MachCallNode; 89 class MachCallRuntimeNode; 90 class MachCallStaticJavaNode; 91 class MachConstantBaseNode; 92 class MachConstantNode; 93 class MachGotoNode; 94 class MachIfNode; 95 class MachNode; 96 class MachNullCheckNode; 97 class MachProjNode; 98 class MachReturnNode; 99 class MachSafePointNode; 100 class MachSpillCopyNode; 101 class MachTempNode; 102 class MachMergeNode; 103 class Matcher; 104 class MemBarNode; 105 class MemBarStoreStoreNode; 106 class MemNode; 107 class MergeMemNode; 108 class MulNode; 109 class MultiNode; 110 class MultiBranchNode; 111 class NeverBranchNode; 112 class Node; 113 class Node_Array; 114 class Node_List; 115 class Node_Stack; 116 class NullCheckNode; 117 class OopMap; 118 class ParmNode; 119 class PCTableNode; 120 class PhaseCCP; 121 class PhaseGVN; 122 class PhaseIterGVN; 123 class PhaseRegAlloc; 124 class PhaseTransform; 125 class PhaseValues; 126 class PhiNode; 127 class Pipeline; 128 class ProjNode; 129 class RegMask; 130 class RegionNode; 131 class RootNode; 132 class SafePointNode; 133 class SafePointScalarObjectNode; 134 class StartNode; 135 class State; 136 class StoreNode; 137 class SubNode; 138 class Type; 139 class TypeNode; 140 class UnlockNode; 141 class VectorNode; 142 class LoadVectorNode; 143 class StoreVectorNode; 144 class VectorSet; 145 typedef void (*NFunc)(Node&,void*); 146 extern "C" { 147 typedef int (*C_sort_func_t)(const void *, const void *); 148 } 149 150 // The type of all node counts and indexes. 151 // It must hold at least 16 bits, but must also be fast to load and store. 152 // This type, if less than 32 bits, could limit the number of possible nodes. 153 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) 154 typedef unsigned int node_idx_t; 155 156 157 #ifndef OPTO_DU_ITERATOR_ASSERT 158 #ifdef ASSERT 159 #define OPTO_DU_ITERATOR_ASSERT 1 160 #else 161 #define OPTO_DU_ITERATOR_ASSERT 0 162 #endif 163 #endif //OPTO_DU_ITERATOR_ASSERT 164 165 #if OPTO_DU_ITERATOR_ASSERT 166 class DUIterator; 167 class DUIterator_Fast; 168 class DUIterator_Last; 169 #else 170 typedef uint DUIterator; 171 typedef Node** DUIterator_Fast; 172 typedef Node** DUIterator_Last; 173 #endif 174 175 // Node Sentinel 176 #define NodeSentinel (Node*)-1 177 178 // Unknown count frequency 179 #define COUNT_UNKNOWN (-1.0f) 180 181 //------------------------------Node------------------------------------------- 182 // Nodes define actions in the program. They create values, which have types. 183 // They are both vertices in a directed graph and program primitives. Nodes 184 // are labeled; the label is the "opcode", the primitive function in the lambda 185 // calculus sense that gives meaning to the Node. Node inputs are ordered (so 186 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to 187 // the Node's function. These inputs also define a Type equation for the Node. 188 // Solving these Type equations amounts to doing dataflow analysis. 189 // Control and data are uniformly represented in the graph. Finally, Nodes 190 // have a unique dense integer index which is used to index into side arrays 191 // whenever I have phase-specific information. 192 193 class Node { 194 friend class VMStructs; 195 196 // Lots of restrictions on cloning Nodes 197 Node(const Node&); // not defined; linker error to use these 198 Node &operator=(const Node &rhs); 199 200 public: 201 friend class Compile; 202 #if OPTO_DU_ITERATOR_ASSERT 203 friend class DUIterator_Common; 204 friend class DUIterator; 205 friend class DUIterator_Fast; 206 friend class DUIterator_Last; 207 #endif 208 209 // Because Nodes come and go, I define an Arena of Node structures to pull 210 // from. This should allow fast access to node creation & deletion. This 211 // field is a local cache of a value defined in some "program fragment" for 212 // which these Nodes are just a part of. 213 214 // New Operator that takes a Compile pointer, this will eventually 215 // be the "new" New operator. 216 inline void* operator new( size_t x, Compile* C) throw() { 217 Node* n = (Node*)C->node_arena()->Amalloc_D(x); 218 #ifdef ASSERT 219 n->_in = (Node**)n; // magic cookie for assertion check 220 #endif 221 n->_out = (Node**)C; 222 return (void*)n; 223 } 224 225 // Delete is a NOP 226 void operator delete( void *ptr ) {} 227 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 228 void destruct(); 229 230 // Create a new Node. Required is the number is of inputs required for 231 // semantic correctness. 232 Node( uint required ); 233 234 // Create a new Node with given input edges. 235 // This version requires use of the "edge-count" new. 236 // E.g. new (C,3) FooNode( C, NULL, left, right ); 237 Node( Node *n0 ); 238 Node( Node *n0, Node *n1 ); 239 Node( Node *n0, Node *n1, Node *n2 ); 240 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); 241 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); 242 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); 243 Node( Node *n0, Node *n1, Node *n2, Node *n3, 244 Node *n4, Node *n5, Node *n6 ); 245 246 // Clone an inherited Node given only the base Node type. 247 Node* clone() const; 248 249 // Clone a Node, immediately supplying one or two new edges. 250 // The first and second arguments, if non-null, replace in(1) and in(2), 251 // respectively. 252 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { 253 Node* nn = clone(); 254 if (in1 != NULL) nn->set_req(1, in1); 255 if (in2 != NULL) nn->set_req(2, in2); 256 return nn; 257 } 258 259 private: 260 // Shared setup for the above constructors. 261 // Handles all interactions with Compile::current. 262 // Puts initial values in all Node fields except _idx. 263 // Returns the initial value for _idx, which cannot 264 // be initialized by assignment. 265 inline int Init(int req, Compile* C); 266 267 //----------------- input edge handling 268 protected: 269 friend class PhaseCFG; // Access to address of _in array elements 270 Node **_in; // Array of use-def references to Nodes 271 Node **_out; // Array of def-use references to Nodes 272 273 // Input edges are split into two categories. Required edges are required 274 // for semantic correctness; order is important and NULLs are allowed. 275 // Precedence edges are used to help determine execution order and are 276 // added, e.g., for scheduling purposes. They are unordered and not 277 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 278 // are required, from _cnt to _max-1 are precedence edges. 279 node_idx_t _cnt; // Total number of required Node inputs. 280 281 node_idx_t _max; // Actual length of input array. 282 283 // Output edges are an unordered list of def-use edges which exactly 284 // correspond to required input edges which point from other nodes 285 // to this one. Thus the count of the output edges is the number of 286 // users of this node. 287 node_idx_t _outcnt; // Total number of Node outputs. 288 289 node_idx_t _outmax; // Actual length of output array. 290 291 // Grow the actual input array to the next larger power-of-2 bigger than len. 292 void grow( uint len ); 293 // Grow the output array to the next larger power-of-2 bigger than len. 294 void out_grow( uint len ); 295 296 public: 297 // Each Node is assigned a unique small/dense number. This number is used 298 // to index into auxiliary arrays of data and bit vectors. 299 // The field _idx is declared constant to defend against inadvertent assignments, 300 // since it is used by clients as a naked field. However, the field's value can be 301 // changed using the set_idx() method. 302 // 303 // The PhaseRenumberLive phase renumbers nodes based on liveness information. 304 // Therefore, it updates the value of the _idx field. The parse-time _idx is 305 // preserved in _parse_idx. 306 const node_idx_t _idx; 307 DEBUG_ONLY(const node_idx_t _parse_idx;) 308 309 // Get the (read-only) number of input edges 310 uint req() const { return _cnt; } 311 uint len() const { return _max; } 312 // Get the (read-only) number of output edges 313 uint outcnt() const { return _outcnt; } 314 315 #if OPTO_DU_ITERATOR_ASSERT 316 // Iterate over the out-edges of this node. Deletions are illegal. 317 inline DUIterator outs() const; 318 // Use this when the out array might have changed to suppress asserts. 319 inline DUIterator& refresh_out_pos(DUIterator& i) const; 320 // Does the node have an out at this position? (Used for iteration.) 321 inline bool has_out(DUIterator& i) const; 322 inline Node* out(DUIterator& i) const; 323 // Iterate over the out-edges of this node. All changes are illegal. 324 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; 325 inline Node* fast_out(DUIterator_Fast& i) const; 326 // Iterate over the out-edges of this node, deleting one at a time. 327 inline DUIterator_Last last_outs(DUIterator_Last& min) const; 328 inline Node* last_out(DUIterator_Last& i) const; 329 // The inline bodies of all these methods are after the iterator definitions. 330 #else 331 // Iterate over the out-edges of this node. Deletions are illegal. 332 // This iteration uses integral indexes, to decouple from array reallocations. 333 DUIterator outs() const { return 0; } 334 // Use this when the out array might have changed to suppress asserts. 335 DUIterator refresh_out_pos(DUIterator i) const { return i; } 336 337 // Reference to the i'th output Node. Error if out of bounds. 338 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } 339 // Does the node have an out at this position? (Used for iteration.) 340 bool has_out(DUIterator i) const { return i < _outcnt; } 341 342 // Iterate over the out-edges of this node. All changes are illegal. 343 // This iteration uses a pointer internal to the out array. 344 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { 345 Node** out = _out; 346 // Assign a limit pointer to the reference argument: 347 max = out + (ptrdiff_t)_outcnt; 348 // Return the base pointer: 349 return out; 350 } 351 Node* fast_out(DUIterator_Fast i) const { return *i; } 352 // Iterate over the out-edges of this node, deleting one at a time. 353 // This iteration uses a pointer internal to the out array. 354 DUIterator_Last last_outs(DUIterator_Last& min) const { 355 Node** out = _out; 356 // Assign a limit pointer to the reference argument: 357 min = out; 358 // Return the pointer to the start of the iteration: 359 return out + (ptrdiff_t)_outcnt - 1; 360 } 361 Node* last_out(DUIterator_Last i) const { return *i; } 362 #endif 363 364 // Reference to the i'th input Node. Error if out of bounds. 365 Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; } 366 // Reference to the i'th input Node. NULL if out of bounds. 367 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } 368 // Reference to the i'th output Node. Error if out of bounds. 369 // Use this accessor sparingly. We are going trying to use iterators instead. 370 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } 371 // Return the unique out edge. 372 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } 373 // Delete out edge at position 'i' by moving last out edge to position 'i' 374 void raw_del_out(uint i) { 375 assert(i < _outcnt,"oob"); 376 assert(_outcnt > 0,"oob"); 377 #if OPTO_DU_ITERATOR_ASSERT 378 // Record that a change happened here. 379 debug_only(_last_del = _out[i]; ++_del_tick); 380 #endif 381 _out[i] = _out[--_outcnt]; 382 // Smash the old edge so it can't be used accidentally. 383 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 384 } 385 386 #ifdef ASSERT 387 bool is_dead() const; 388 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) 389 #endif 390 // Check whether node has become unreachable 391 bool is_unreachable(PhaseIterGVN &igvn) const; 392 393 // Set a required input edge, also updates corresponding output edge 394 void add_req( Node *n ); // Append a NEW required input 395 void add_req( Node *n0, Node *n1 ) { 396 add_req(n0); add_req(n1); } 397 void add_req( Node *n0, Node *n1, Node *n2 ) { 398 add_req(n0); add_req(n1); add_req(n2); } 399 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). 400 void del_req( uint idx ); // Delete required edge & compact 401 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order 402 void ins_req( uint i, Node *n ); // Insert a NEW required input 403 void set_req( uint i, Node *n ) { 404 assert( is_not_dead(n), "can not use dead node"); 405 assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt)); 406 assert( !VerifyHashTableKeys || _hash_lock == 0, 407 "remove node from hash table before modifying it"); 408 Node** p = &_in[i]; // cache this._in, across the del_out call 409 if (*p != NULL) (*p)->del_out((Node *)this); 410 (*p) = n; 411 if (n != NULL) n->add_out((Node *)this); 412 } 413 // Light version of set_req() to init inputs after node creation. 414 void init_req( uint i, Node *n ) { 415 assert( i == 0 && this == n || 416 is_not_dead(n), "can not use dead node"); 417 assert( i < _cnt, "oob"); 418 assert( !VerifyHashTableKeys || _hash_lock == 0, 419 "remove node from hash table before modifying it"); 420 assert( _in[i] == NULL, "sanity"); 421 _in[i] = n; 422 if (n != NULL) n->add_out((Node *)this); 423 } 424 // Find first occurrence of n among my edges: 425 int find_edge(Node* n); 426 int find_prec_edge(Node* n) { 427 for (uint i = req(); i < len(); i++) { 428 if (_in[i] == n) return i; 429 if (_in[i] == NULL) { 430 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); ) 431 break; 432 } 433 } 434 return -1; 435 } 436 int replace_edge(Node* old, Node* neww); 437 int replace_edges_in_range(Node* old, Node* neww, int start, int end); 438 // NULL out all inputs to eliminate incoming Def-Use edges. 439 // Return the number of edges between 'n' and 'this' 440 int disconnect_inputs(Node *n, Compile *c); 441 442 // Quickly, return true if and only if I am Compile::current()->top(). 443 bool is_top() const { 444 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); 445 return (_out == NULL); 446 } 447 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) 448 void setup_is_top(); 449 450 // Strip away casting. (It is depth-limited.) 451 Node* uncast() const; 452 // Return whether two Nodes are equivalent, after stripping casting. 453 bool eqv_uncast(const Node* n) const { 454 return (this->uncast() == n->uncast()); 455 } 456 457 // Find out of current node that matches opcode. 458 Node* find_out_with(int opcode); 459 460 private: 461 static Node* uncast_helper(const Node* n); 462 463 // Add an output edge to the end of the list 464 void add_out( Node *n ) { 465 if (is_top()) return; 466 if( _outcnt == _outmax ) out_grow(_outcnt); 467 _out[_outcnt++] = n; 468 } 469 // Delete an output edge 470 void del_out( Node *n ) { 471 if (is_top()) return; 472 Node** outp = &_out[_outcnt]; 473 // Find and remove n 474 do { 475 assert(outp > _out, "Missing Def-Use edge"); 476 } while (*--outp != n); 477 *outp = _out[--_outcnt]; 478 // Smash the old edge so it can't be used accidentally. 479 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 480 // Record that a change happened here. 481 #if OPTO_DU_ITERATOR_ASSERT 482 debug_only(_last_del = n; ++_del_tick); 483 #endif 484 } 485 // Close gap after removing edge. 486 void close_prec_gap_at(uint gap) { 487 assert(_cnt <= gap && gap < _max, "no valid prec edge"); 488 uint i = gap; 489 Node *last = NULL; 490 for (; i < _max-1; ++i) { 491 Node *next = _in[i+1]; 492 if (next == NULL) break; 493 last = next; 494 } 495 _in[gap] = last; // Move last slot to empty one. 496 _in[i] = NULL; // NULL out last slot. 497 } 498 499 public: 500 // Globally replace this node by a given new node, updating all uses. 501 void replace_by(Node* new_node); 502 // Globally replace this node by a given new node, updating all uses 503 // and cutting input edges of old node. 504 void subsume_by(Node* new_node, Compile* c) { 505 replace_by(new_node); 506 disconnect_inputs(NULL, c); 507 } 508 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); 509 // Find the one non-null required input. RegionNode only 510 Node *nonnull_req() const; 511 // Add or remove precedence edges 512 void add_prec( Node *n ); 513 void rm_prec( uint i ); 514 515 // Note: prec(i) will not necessarily point to n if edge already exists. 516 void set_prec( uint i, Node *n ) { 517 assert(i < _max, err_msg("oob: i=%d, _max=%d", i, _max)); 518 assert(is_not_dead(n), "can not use dead node"); 519 assert(i >= _cnt, "not a precedence edge"); 520 // Avoid spec violation: duplicated prec edge. 521 if (_in[i] == n) return; 522 if (n == NULL || find_prec_edge(n) != -1) { 523 rm_prec(i); 524 return; 525 } 526 if (_in[i] != NULL) _in[i]->del_out((Node *)this); 527 _in[i] = n; 528 if (n != NULL) n->add_out((Node *)this); 529 } 530 531 // Set this node's index, used by cisc_version to replace current node 532 void set_idx(uint new_idx) { 533 const node_idx_t* ref = &_idx; 534 *(node_idx_t*)ref = new_idx; 535 } 536 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) 537 void swap_edges(uint i1, uint i2) { 538 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 539 // Def-Use info is unchanged 540 Node* n1 = in(i1); 541 Node* n2 = in(i2); 542 _in[i1] = n2; 543 _in[i2] = n1; 544 // If this node is in the hash table, make sure it doesn't need a rehash. 545 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); 546 } 547 548 // Iterators over input Nodes for a Node X are written as: 549 // for( i = 0; i < X.req(); i++ ) ... X[i] ... 550 // NOTE: Required edges can contain embedded NULL pointers. 551 552 //----------------- Other Node Properties 553 554 // Generate class id for some ideal nodes to avoid virtual query 555 // methods is_<Node>(). 556 // Class id is the set of bits corresponded to the node class and all its 557 // super classes so that queries for super classes are also valid. 558 // Subclasses of the same super class have different assigned bit 559 // (the third parameter in the macro DEFINE_CLASS_ID). 560 // Classes with deeper hierarchy are declared first. 561 // Classes with the same hierarchy depth are sorted by usage frequency. 562 // 563 // The query method masks the bits to cut off bits of subclasses 564 // and then compare the result with the class id 565 // (see the macro DEFINE_CLASS_QUERY below). 566 // 567 // Class_MachCall=30, ClassMask_MachCall=31 568 // 12 8 4 0 569 // 0 0 0 0 0 0 0 0 1 1 1 1 0 570 // | | | | 571 // | | | Bit_Mach=2 572 // | | Bit_MachReturn=4 573 // | Bit_MachSafePoint=8 574 // Bit_MachCall=16 575 // 576 // Class_CountedLoop=56, ClassMask_CountedLoop=63 577 // 12 8 4 0 578 // 0 0 0 0 0 0 0 1 1 1 0 0 0 579 // | | | 580 // | | Bit_Region=8 581 // | Bit_Loop=16 582 // Bit_CountedLoop=32 583 584 #define DEFINE_CLASS_ID(cl, supcl, subn) \ 585 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ 586 Class_##cl = Class_##supcl + Bit_##cl , \ 587 ClassMask_##cl = ((Bit_##cl << 1) - 1) , 588 589 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods 590 // so that it's values fits into 16 bits. 591 enum NodeClasses { 592 Bit_Node = 0x0000, 593 Class_Node = 0x0000, 594 ClassMask_Node = 0xFFFF, 595 596 DEFINE_CLASS_ID(Multi, Node, 0) 597 DEFINE_CLASS_ID(SafePoint, Multi, 0) 598 DEFINE_CLASS_ID(Call, SafePoint, 0) 599 DEFINE_CLASS_ID(CallJava, Call, 0) 600 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) 601 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) 602 DEFINE_CLASS_ID(CallRuntime, Call, 1) 603 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) 604 DEFINE_CLASS_ID(Allocate, Call, 2) 605 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) 606 DEFINE_CLASS_ID(AbstractLock, Call, 3) 607 DEFINE_CLASS_ID(Lock, AbstractLock, 0) 608 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) 609 DEFINE_CLASS_ID(MultiBranch, Multi, 1) 610 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) 611 DEFINE_CLASS_ID(Catch, PCTable, 0) 612 DEFINE_CLASS_ID(Jump, PCTable, 1) 613 DEFINE_CLASS_ID(If, MultiBranch, 1) 614 DEFINE_CLASS_ID(CountedLoopEnd, If, 0) 615 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) 616 DEFINE_CLASS_ID(Start, Multi, 2) 617 DEFINE_CLASS_ID(MemBar, Multi, 3) 618 DEFINE_CLASS_ID(Initialize, MemBar, 0) 619 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) 620 621 DEFINE_CLASS_ID(Mach, Node, 1) 622 DEFINE_CLASS_ID(MachReturn, Mach, 0) 623 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) 624 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) 625 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) 626 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) 627 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 628 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 629 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 630 DEFINE_CLASS_ID(MachBranch, Mach, 1) 631 DEFINE_CLASS_ID(MachIf, MachBranch, 0) 632 DEFINE_CLASS_ID(MachGoto, MachBranch, 1) 633 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) 634 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) 635 DEFINE_CLASS_ID(MachTemp, Mach, 3) 636 DEFINE_CLASS_ID(MachConstantBase, Mach, 4) 637 DEFINE_CLASS_ID(MachConstant, Mach, 5) 638 DEFINE_CLASS_ID(MachMerge, Mach, 6) 639 640 DEFINE_CLASS_ID(Type, Node, 2) 641 DEFINE_CLASS_ID(Phi, Type, 0) 642 DEFINE_CLASS_ID(ConstraintCast, Type, 1) 643 DEFINE_CLASS_ID(CastII, ConstraintCast, 0) 644 DEFINE_CLASS_ID(CheckCastPP, Type, 2) 645 DEFINE_CLASS_ID(CMove, Type, 3) 646 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) 647 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) 648 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) 649 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) 650 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) 651 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) 652 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) 653 654 DEFINE_CLASS_ID(Proj, Node, 3) 655 DEFINE_CLASS_ID(CatchProj, Proj, 0) 656 DEFINE_CLASS_ID(JumpProj, Proj, 1) 657 DEFINE_CLASS_ID(IfTrue, Proj, 2) 658 DEFINE_CLASS_ID(IfFalse, Proj, 3) 659 DEFINE_CLASS_ID(Parm, Proj, 4) 660 DEFINE_CLASS_ID(MachProj, Proj, 5) 661 662 DEFINE_CLASS_ID(Mem, Node, 4) 663 DEFINE_CLASS_ID(Load, Mem, 0) 664 DEFINE_CLASS_ID(LoadVector, Load, 0) 665 DEFINE_CLASS_ID(Store, Mem, 1) 666 DEFINE_CLASS_ID(StoreVector, Store, 0) 667 DEFINE_CLASS_ID(LoadStore, Mem, 2) 668 669 DEFINE_CLASS_ID(Region, Node, 5) 670 DEFINE_CLASS_ID(Loop, Region, 0) 671 DEFINE_CLASS_ID(Root, Loop, 0) 672 DEFINE_CLASS_ID(CountedLoop, Loop, 1) 673 674 DEFINE_CLASS_ID(Sub, Node, 6) 675 DEFINE_CLASS_ID(Cmp, Sub, 0) 676 DEFINE_CLASS_ID(FastLock, Cmp, 0) 677 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) 678 679 DEFINE_CLASS_ID(MergeMem, Node, 7) 680 DEFINE_CLASS_ID(Bool, Node, 8) 681 DEFINE_CLASS_ID(AddP, Node, 9) 682 DEFINE_CLASS_ID(BoxLock, Node, 10) 683 DEFINE_CLASS_ID(Add, Node, 11) 684 DEFINE_CLASS_ID(Mul, Node, 12) 685 DEFINE_CLASS_ID(Vector, Node, 13) 686 DEFINE_CLASS_ID(ClearArray, Node, 14) 687 688 _max_classes = ClassMask_ClearArray 689 }; 690 #undef DEFINE_CLASS_ID 691 692 // Flags are sorted by usage frequency. 693 enum NodeFlags { 694 Flag_is_Copy = 0x01, // should be first bit to avoid shift 695 Flag_rematerialize = Flag_is_Copy << 1, 696 Flag_needs_anti_dependence_check = Flag_rematerialize << 1, 697 Flag_is_macro = Flag_needs_anti_dependence_check << 1, 698 Flag_is_Con = Flag_is_macro << 1, 699 Flag_is_cisc_alternate = Flag_is_Con << 1, 700 Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1, 701 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, 702 Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1, 703 Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1, 704 Flag_has_call = Flag_avoid_back_to_back_after << 1, 705 Flag_is_expensive = Flag_has_call << 1, 706 _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination 707 }; 708 709 private: 710 jushort _class_id; 711 jushort _flags; 712 713 protected: 714 // These methods should be called from constructors only. 715 void init_class_id(jushort c) { 716 assert(c <= _max_classes, "invalid node class"); 717 _class_id = c; // cast out const 718 } 719 void init_flags(jushort fl) { 720 assert(fl <= _max_flags, "invalid node flag"); 721 _flags |= fl; 722 } 723 void clear_flag(jushort fl) { 724 assert(fl <= _max_flags, "invalid node flag"); 725 _flags &= ~fl; 726 } 727 728 public: 729 const jushort class_id() const { return _class_id; } 730 731 const jushort flags() const { return _flags; } 732 733 // Return a dense integer opcode number 734 virtual int Opcode() const; 735 736 // Virtual inherited Node size 737 virtual uint size_of() const; 738 739 // Other interesting Node properties 740 #define DEFINE_CLASS_QUERY(type) \ 741 bool is_##type() const { \ 742 return ((_class_id & ClassMask_##type) == Class_##type); \ 743 } \ 744 type##Node *as_##type() const { \ 745 assert(is_##type(), "invalid node class"); \ 746 return (type##Node*)this; \ 747 } \ 748 type##Node* isa_##type() const { \ 749 return (is_##type()) ? as_##type() : NULL; \ 750 } 751 752 DEFINE_CLASS_QUERY(AbstractLock) 753 DEFINE_CLASS_QUERY(Add) 754 DEFINE_CLASS_QUERY(AddP) 755 DEFINE_CLASS_QUERY(Allocate) 756 DEFINE_CLASS_QUERY(AllocateArray) 757 DEFINE_CLASS_QUERY(Bool) 758 DEFINE_CLASS_QUERY(BoxLock) 759 DEFINE_CLASS_QUERY(Call) 760 DEFINE_CLASS_QUERY(CallDynamicJava) 761 DEFINE_CLASS_QUERY(CallJava) 762 DEFINE_CLASS_QUERY(CallLeaf) 763 DEFINE_CLASS_QUERY(CallRuntime) 764 DEFINE_CLASS_QUERY(CallStaticJava) 765 DEFINE_CLASS_QUERY(Catch) 766 DEFINE_CLASS_QUERY(CatchProj) 767 DEFINE_CLASS_QUERY(CheckCastPP) 768 DEFINE_CLASS_QUERY(CastII) 769 DEFINE_CLASS_QUERY(ConstraintCast) 770 DEFINE_CLASS_QUERY(ClearArray) 771 DEFINE_CLASS_QUERY(CMove) 772 DEFINE_CLASS_QUERY(Cmp) 773 DEFINE_CLASS_QUERY(CountedLoop) 774 DEFINE_CLASS_QUERY(CountedLoopEnd) 775 DEFINE_CLASS_QUERY(DecodeNarrowPtr) 776 DEFINE_CLASS_QUERY(DecodeN) 777 DEFINE_CLASS_QUERY(DecodeNKlass) 778 DEFINE_CLASS_QUERY(EncodeNarrowPtr) 779 DEFINE_CLASS_QUERY(EncodeP) 780 DEFINE_CLASS_QUERY(EncodePKlass) 781 DEFINE_CLASS_QUERY(FastLock) 782 DEFINE_CLASS_QUERY(FastUnlock) 783 DEFINE_CLASS_QUERY(If) 784 DEFINE_CLASS_QUERY(IfFalse) 785 DEFINE_CLASS_QUERY(IfTrue) 786 DEFINE_CLASS_QUERY(Initialize) 787 DEFINE_CLASS_QUERY(Jump) 788 DEFINE_CLASS_QUERY(JumpProj) 789 DEFINE_CLASS_QUERY(Load) 790 DEFINE_CLASS_QUERY(LoadStore) 791 DEFINE_CLASS_QUERY(Lock) 792 DEFINE_CLASS_QUERY(Loop) 793 DEFINE_CLASS_QUERY(Mach) 794 DEFINE_CLASS_QUERY(MachBranch) 795 DEFINE_CLASS_QUERY(MachCall) 796 DEFINE_CLASS_QUERY(MachCallDynamicJava) 797 DEFINE_CLASS_QUERY(MachCallJava) 798 DEFINE_CLASS_QUERY(MachCallLeaf) 799 DEFINE_CLASS_QUERY(MachCallRuntime) 800 DEFINE_CLASS_QUERY(MachCallStaticJava) 801 DEFINE_CLASS_QUERY(MachConstantBase) 802 DEFINE_CLASS_QUERY(MachConstant) 803 DEFINE_CLASS_QUERY(MachGoto) 804 DEFINE_CLASS_QUERY(MachIf) 805 DEFINE_CLASS_QUERY(MachNullCheck) 806 DEFINE_CLASS_QUERY(MachProj) 807 DEFINE_CLASS_QUERY(MachReturn) 808 DEFINE_CLASS_QUERY(MachSafePoint) 809 DEFINE_CLASS_QUERY(MachSpillCopy) 810 DEFINE_CLASS_QUERY(MachTemp) 811 DEFINE_CLASS_QUERY(MachMerge) 812 DEFINE_CLASS_QUERY(Mem) 813 DEFINE_CLASS_QUERY(MemBar) 814 DEFINE_CLASS_QUERY(MemBarStoreStore) 815 DEFINE_CLASS_QUERY(MergeMem) 816 DEFINE_CLASS_QUERY(Mul) 817 DEFINE_CLASS_QUERY(Multi) 818 DEFINE_CLASS_QUERY(MultiBranch) 819 DEFINE_CLASS_QUERY(Parm) 820 DEFINE_CLASS_QUERY(PCTable) 821 DEFINE_CLASS_QUERY(Phi) 822 DEFINE_CLASS_QUERY(Proj) 823 DEFINE_CLASS_QUERY(Region) 824 DEFINE_CLASS_QUERY(Root) 825 DEFINE_CLASS_QUERY(SafePoint) 826 DEFINE_CLASS_QUERY(SafePointScalarObject) 827 DEFINE_CLASS_QUERY(Start) 828 DEFINE_CLASS_QUERY(Store) 829 DEFINE_CLASS_QUERY(Sub) 830 DEFINE_CLASS_QUERY(Type) 831 DEFINE_CLASS_QUERY(Vector) 832 DEFINE_CLASS_QUERY(LoadVector) 833 DEFINE_CLASS_QUERY(StoreVector) 834 DEFINE_CLASS_QUERY(Unlock) 835 836 #undef DEFINE_CLASS_QUERY 837 838 // duplicate of is_MachSpillCopy() 839 bool is_SpillCopy () const { 840 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); 841 } 842 843 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } 844 // The data node which is safe to leave in dead loop during IGVN optimization. 845 bool is_dead_loop_safe() const { 846 return is_Phi() || (is_Proj() && in(0) == NULL) || 847 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && 848 (!is_Proj() || !in(0)->is_Allocate())); 849 } 850 851 // is_Copy() returns copied edge index (0 or 1) 852 uint is_Copy() const { return (_flags & Flag_is_Copy); } 853 854 virtual bool is_CFG() const { return false; } 855 856 // If this node is control-dependent on a test, can it be 857 // rerouted to a dominating equivalent test? This is usually 858 // true of non-CFG nodes, but can be false for operations which 859 // depend for their correct sequencing on more than one test. 860 // (In that case, hoisting to a dominating test may silently 861 // skip some other important test.) 862 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; 863 864 // When building basic blocks, I need to have a notion of block beginning 865 // Nodes, next block selector Nodes (block enders), and next block 866 // projections. These calls need to work on their machine equivalents. The 867 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. 868 bool is_block_start() const { 869 if ( is_Region() ) 870 return this == (const Node*)in(0); 871 else 872 return is_Start(); 873 } 874 875 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, 876 // Goto and Return. This call also returns the block ending Node. 877 virtual const Node *is_block_proj() const; 878 879 // The node is a "macro" node which needs to be expanded before matching 880 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } 881 // The node is expensive: the best control is set during loop opts 882 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; } 883 884 //----------------- Optimization 885 886 // Get the worst-case Type output for this Node. 887 virtual const class Type *bottom_type() const; 888 889 // If we find a better type for a node, try to record it permanently. 890 // Return true if this node actually changed. 891 // Be sure to do the hash_delete game in the "rehash" variant. 892 void raise_bottom_type(const Type* new_type); 893 894 // Get the address type with which this node uses and/or defs memory, 895 // or NULL if none. The address type is conservatively wide. 896 // Returns non-null for calls, membars, loads, stores, etc. 897 // Returns TypePtr::BOTTOM if the node touches memory "broadly". 898 virtual const class TypePtr *adr_type() const { return NULL; } 899 900 // Return an existing node which computes the same function as this node. 901 // The optimistic combined algorithm requires this to return a Node which 902 // is a small number of steps away (e.g., one of my inputs). 903 virtual Node *Identity( PhaseTransform *phase ); 904 905 // Return the set of values this Node can take on at runtime. 906 virtual const Type *Value( PhaseTransform *phase ) const; 907 908 // Return a node which is more "ideal" than the current node. 909 // The invariants on this call are subtle. If in doubt, read the 910 // treatise in node.cpp above the default implemention AND TEST WITH 911 // +VerifyIterativeGVN! 912 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 913 914 // Some nodes have specific Ideal subgraph transformations only if they are 915 // unique users of specific nodes. Such nodes should be put on IGVN worklist 916 // for the transformations to happen. 917 bool has_special_unique_user() const; 918 919 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 920 Node* find_exact_control(Node* ctrl); 921 922 // Check if 'this' node dominates or equal to 'sub'. 923 bool dominates(Node* sub, Node_List &nlist); 924 925 protected: 926 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); 927 public: 928 929 // Idealize graph, using DU info. Done after constant propagation 930 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 931 932 // See if there is valid pipeline info 933 static const Pipeline *pipeline_class(); 934 virtual const Pipeline *pipeline() const; 935 936 // Compute the latency from the def to this instruction of the ith input node 937 uint latency(uint i); 938 939 // Hash & compare functions, for pessimistic value numbering 940 941 // If the hash function returns the special sentinel value NO_HASH, 942 // the node is guaranteed never to compare equal to any other node. 943 // If we accidentally generate a hash with value NO_HASH the node 944 // won't go into the table and we'll lose a little optimization. 945 enum { NO_HASH = 0 }; 946 virtual uint hash() const; 947 virtual uint cmp( const Node &n ) const; 948 949 // Operation appears to be iteratively computed (such as an induction variable) 950 // It is possible for this operation to return false for a loop-varying 951 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 952 bool is_iteratively_computed(); 953 954 // Determine if a node is Counted loop induction variable. 955 // The method is defined in loopnode.cpp. 956 const Node* is_loop_iv() const; 957 958 // Return a node with opcode "opc" and same inputs as "this" if one can 959 // be found; Otherwise return NULL; 960 Node* find_similar(int opc); 961 962 // Return the unique control out if only one. Null if none or more than one. 963 Node* unique_ctrl_out(); 964 965 //----------------- Code Generation 966 967 // Ideal register class for Matching. Zero means unmatched instruction 968 // (these are cloned instead of converted to machine nodes). 969 virtual uint ideal_reg() const; 970 971 static const uint NotAMachineReg; // must be > max. machine register 972 973 // Do we Match on this edge index or not? Generally false for Control 974 // and true for everything else. Weird for calls & returns. 975 virtual uint match_edge(uint idx) const; 976 977 // Register class output is returned in 978 virtual const RegMask &out_RegMask() const; 979 // Register class input is expected in 980 virtual const RegMask &in_RegMask(uint) const; 981 // Should we clone rather than spill this instruction? 982 bool rematerialize() const; 983 984 // Return JVM State Object if this Node carries debug info, or NULL otherwise 985 virtual JVMState* jvms() const; 986 987 // Print as assembly 988 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; 989 // Emit bytes starting at parameter 'ptr' 990 // Bump 'ptr' by the number of output bytes 991 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; 992 // Size of instruction in bytes 993 virtual uint size(PhaseRegAlloc *ra_) const; 994 995 // Convenience function to extract an integer constant from a node. 996 // If it is not an integer constant (either Con, CastII, or Mach), 997 // return value_if_unknown. 998 jint find_int_con(jint value_if_unknown) const { 999 const TypeInt* t = find_int_type(); 1000 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 1001 } 1002 // Return the constant, knowing it is an integer constant already 1003 jint get_int() const { 1004 const TypeInt* t = find_int_type(); 1005 guarantee(t != NULL, "must be con"); 1006 return t->get_con(); 1007 } 1008 // Here's where the work is done. Can produce non-constant int types too. 1009 const TypeInt* find_int_type() const; 1010 1011 // Same thing for long (and intptr_t, via type.hpp): 1012 jlong get_long() const { 1013 const TypeLong* t = find_long_type(); 1014 guarantee(t != NULL, "must be con"); 1015 return t->get_con(); 1016 } 1017 jlong find_long_con(jint value_if_unknown) const { 1018 const TypeLong* t = find_long_type(); 1019 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 1020 } 1021 const TypeLong* find_long_type() const; 1022 1023 const TypePtr* get_ptr_type() const; 1024 1025 // These guys are called by code generated by ADLC: 1026 intptr_t get_ptr() const; 1027 intptr_t get_narrowcon() const; 1028 jdouble getd() const; 1029 jfloat getf() const; 1030 1031 // Nodes which are pinned into basic blocks 1032 virtual bool pinned() const { return false; } 1033 1034 // Nodes which use memory without consuming it, hence need antidependences 1035 // More specifically, needs_anti_dependence_check returns true iff the node 1036 // (a) does a load, and (b) does not perform a store (except perhaps to a 1037 // stack slot or some other unaliased location). 1038 bool needs_anti_dependence_check() const; 1039 1040 // Return which operand this instruction may cisc-spill. In other words, 1041 // return operand position that can convert from reg to memory access 1042 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } 1043 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } 1044 1045 //----------------- Graph walking 1046 public: 1047 // Walk and apply member functions recursively. 1048 // Supplied (this) pointer is root. 1049 void walk(NFunc pre, NFunc post, void *env); 1050 static void nop(Node &, void*); // Dummy empty function 1051 static void packregion( Node &n, void* ); 1052 private: 1053 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); 1054 1055 //----------------- Printing, etc 1056 public: 1057 #ifndef PRODUCT 1058 Node* find(int idx) const; // Search the graph for the given idx. 1059 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. 1060 void dump() const { dump("\n"); } // Print this node. 1061 void dump(const char* suffix, outputStream *st = tty) const;// Print this node. 1062 void dump(int depth) const; // Print this node, recursively to depth d 1063 void dump_ctrl(int depth) const; // Print control nodes, to depth d 1064 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info 1065 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info 1066 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info 1067 virtual void dump_spec(outputStream *st) const {}; // Print per-node info 1068 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges 1069 void verify() const; // Check Def-Use info for my subgraph 1070 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); 1071 1072 // This call defines a class-unique string used to identify class instances 1073 virtual const char *Name() const; 1074 1075 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) 1076 // RegMask Print Functions 1077 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } 1078 void dump_out_regmask() { out_RegMask().dump(); } 1079 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } 1080 void fast_dump() const { 1081 tty->print("%4d: %-17s", _idx, Name()); 1082 for (uint i = 0; i < len(); i++) 1083 if (in(i)) 1084 tty->print(" %4d", in(i)->_idx); 1085 else 1086 tty->print(" NULL"); 1087 tty->print("\n"); 1088 } 1089 #endif 1090 #ifdef ASSERT 1091 void verify_construction(); 1092 bool verify_jvms(const JVMState* jvms) const; 1093 int _debug_idx; // Unique value assigned to every node. 1094 int debug_idx() const { return _debug_idx; } 1095 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } 1096 1097 Node* _debug_orig; // Original version of this, if any. 1098 Node* debug_orig() const { return _debug_orig; } 1099 void set_debug_orig(Node* orig); // _debug_orig = orig 1100 1101 int _hash_lock; // Barrier to modifications of nodes in the hash table 1102 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } 1103 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } 1104 1105 static void init_NodeProperty(); 1106 1107 #if OPTO_DU_ITERATOR_ASSERT 1108 const Node* _last_del; // The last deleted node. 1109 uint _del_tick; // Bumped when a deletion happens.. 1110 #endif 1111 #endif 1112 }; 1113 1114 //----------------------------------------------------------------------------- 1115 // Iterators over DU info, and associated Node functions. 1116 1117 #if OPTO_DU_ITERATOR_ASSERT 1118 1119 // Common code for assertion checking on DU iterators. 1120 class DUIterator_Common VALUE_OBJ_CLASS_SPEC { 1121 #ifdef ASSERT 1122 protected: 1123 bool _vdui; // cached value of VerifyDUIterators 1124 const Node* _node; // the node containing the _out array 1125 uint _outcnt; // cached node->_outcnt 1126 uint _del_tick; // cached node->_del_tick 1127 Node* _last; // last value produced by the iterator 1128 1129 void sample(const Node* node); // used by c'tor to set up for verifies 1130 void verify(const Node* node, bool at_end_ok = false); 1131 void verify_resync(); 1132 void reset(const DUIterator_Common& that); 1133 1134 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators 1135 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } 1136 #else 1137 #define I_VDUI_ONLY(i,x) { } 1138 #endif //ASSERT 1139 }; 1140 1141 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) 1142 1143 // Default DU iterator. Allows appends onto the out array. 1144 // Allows deletion from the out array only at the current point. 1145 // Usage: 1146 // for (DUIterator i = x->outs(); x->has_out(i); i++) { 1147 // Node* y = x->out(i); 1148 // ... 1149 // } 1150 // Compiles in product mode to a unsigned integer index, which indexes 1151 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate 1152 // also reloads x->_outcnt. If you delete, you must perform "--i" just 1153 // before continuing the loop. You must delete only the last-produced 1154 // edge. You must delete only a single copy of the last-produced edge, 1155 // or else you must delete all copies at once (the first time the edge 1156 // is produced by the iterator). 1157 class DUIterator : public DUIterator_Common { 1158 friend class Node; 1159 1160 // This is the index which provides the product-mode behavior. 1161 // Whatever the product-mode version of the system does to the 1162 // DUI index is done to this index. All other fields in 1163 // this class are used only for assertion checking. 1164 uint _idx; 1165 1166 #ifdef ASSERT 1167 uint _refresh_tick; // Records the refresh activity. 1168 1169 void sample(const Node* node); // Initialize _refresh_tick etc. 1170 void verify(const Node* node, bool at_end_ok = false); 1171 void verify_increment(); // Verify an increment operation. 1172 void verify_resync(); // Verify that we can back up over a deletion. 1173 void verify_finish(); // Verify that the loop terminated properly. 1174 void refresh(); // Resample verification info. 1175 void reset(const DUIterator& that); // Resample after assignment. 1176 #endif 1177 1178 DUIterator(const Node* node, int dummy_to_avoid_conversion) 1179 { _idx = 0; debug_only(sample(node)); } 1180 1181 public: 1182 // initialize to garbage; clear _vdui to disable asserts 1183 DUIterator() 1184 { /*initialize to garbage*/ debug_only(_vdui = false); } 1185 1186 void operator++(int dummy_to_specify_postfix_op) 1187 { _idx++; VDUI_ONLY(verify_increment()); } 1188 1189 void operator--() 1190 { VDUI_ONLY(verify_resync()); --_idx; } 1191 1192 ~DUIterator() 1193 { VDUI_ONLY(verify_finish()); } 1194 1195 void operator=(const DUIterator& that) 1196 { _idx = that._idx; debug_only(reset(that)); } 1197 }; 1198 1199 DUIterator Node::outs() const 1200 { return DUIterator(this, 0); } 1201 DUIterator& Node::refresh_out_pos(DUIterator& i) const 1202 { I_VDUI_ONLY(i, i.refresh()); return i; } 1203 bool Node::has_out(DUIterator& i) const 1204 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } 1205 Node* Node::out(DUIterator& i) const 1206 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } 1207 1208 1209 // Faster DU iterator. Disallows insertions into the out array. 1210 // Allows deletion from the out array only at the current point. 1211 // Usage: 1212 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { 1213 // Node* y = x->fast_out(i); 1214 // ... 1215 // } 1216 // Compiles in product mode to raw Node** pointer arithmetic, with 1217 // no reloading of pointers from the original node x. If you delete, 1218 // you must perform "--i; --imax" just before continuing the loop. 1219 // If you delete multiple copies of the same edge, you must decrement 1220 // imax, but not i, multiple times: "--i, imax -= num_edges". 1221 class DUIterator_Fast : public DUIterator_Common { 1222 friend class Node; 1223 friend class DUIterator_Last; 1224 1225 // This is the pointer which provides the product-mode behavior. 1226 // Whatever the product-mode version of the system does to the 1227 // DUI pointer is done to this pointer. All other fields in 1228 // this class are used only for assertion checking. 1229 Node** _outp; 1230 1231 #ifdef ASSERT 1232 void verify(const Node* node, bool at_end_ok = false); 1233 void verify_limit(); 1234 void verify_resync(); 1235 void verify_relimit(uint n); 1236 void reset(const DUIterator_Fast& that); 1237 #endif 1238 1239 // Note: offset must be signed, since -1 is sometimes passed 1240 DUIterator_Fast(const Node* node, ptrdiff_t offset) 1241 { _outp = node->_out + offset; debug_only(sample(node)); } 1242 1243 public: 1244 // initialize to garbage; clear _vdui to disable asserts 1245 DUIterator_Fast() 1246 { /*initialize to garbage*/ debug_only(_vdui = false); } 1247 1248 void operator++(int dummy_to_specify_postfix_op) 1249 { _outp++; VDUI_ONLY(verify(_node, true)); } 1250 1251 void operator--() 1252 { VDUI_ONLY(verify_resync()); --_outp; } 1253 1254 void operator-=(uint n) // applied to the limit only 1255 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } 1256 1257 bool operator<(DUIterator_Fast& limit) { 1258 I_VDUI_ONLY(*this, this->verify(_node, true)); 1259 I_VDUI_ONLY(limit, limit.verify_limit()); 1260 return _outp < limit._outp; 1261 } 1262 1263 void operator=(const DUIterator_Fast& that) 1264 { _outp = that._outp; debug_only(reset(that)); } 1265 }; 1266 1267 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { 1268 // Assign a limit pointer to the reference argument: 1269 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); 1270 // Return the base pointer: 1271 return DUIterator_Fast(this, 0); 1272 } 1273 Node* Node::fast_out(DUIterator_Fast& i) const { 1274 I_VDUI_ONLY(i, i.verify(this)); 1275 return debug_only(i._last=) *i._outp; 1276 } 1277 1278 1279 // Faster DU iterator. Requires each successive edge to be removed. 1280 // Does not allow insertion of any edges. 1281 // Usage: 1282 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { 1283 // Node* y = x->last_out(i); 1284 // ... 1285 // } 1286 // Compiles in product mode to raw Node** pointer arithmetic, with 1287 // no reloading of pointers from the original node x. 1288 class DUIterator_Last : private DUIterator_Fast { 1289 friend class Node; 1290 1291 #ifdef ASSERT 1292 void verify(const Node* node, bool at_end_ok = false); 1293 void verify_limit(); 1294 void verify_step(uint num_edges); 1295 #endif 1296 1297 // Note: offset must be signed, since -1 is sometimes passed 1298 DUIterator_Last(const Node* node, ptrdiff_t offset) 1299 : DUIterator_Fast(node, offset) { } 1300 1301 void operator++(int dummy_to_specify_postfix_op) {} // do not use 1302 void operator<(int) {} // do not use 1303 1304 public: 1305 DUIterator_Last() { } 1306 // initialize to garbage 1307 1308 void operator--() 1309 { _outp--; VDUI_ONLY(verify_step(1)); } 1310 1311 void operator-=(uint n) 1312 { _outp -= n; VDUI_ONLY(verify_step(n)); } 1313 1314 bool operator>=(DUIterator_Last& limit) { 1315 I_VDUI_ONLY(*this, this->verify(_node, true)); 1316 I_VDUI_ONLY(limit, limit.verify_limit()); 1317 return _outp >= limit._outp; 1318 } 1319 1320 void operator=(const DUIterator_Last& that) 1321 { DUIterator_Fast::operator=(that); } 1322 }; 1323 1324 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { 1325 // Assign a limit pointer to the reference argument: 1326 imin = DUIterator_Last(this, 0); 1327 // Return the initial pointer: 1328 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); 1329 } 1330 Node* Node::last_out(DUIterator_Last& i) const { 1331 I_VDUI_ONLY(i, i.verify(this)); 1332 return debug_only(i._last=) *i._outp; 1333 } 1334 1335 #endif //OPTO_DU_ITERATOR_ASSERT 1336 1337 #undef I_VDUI_ONLY 1338 #undef VDUI_ONLY 1339 1340 // An Iterator that truly follows the iterator pattern. Doesn't 1341 // support deletion but could be made to. 1342 // 1343 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { 1344 // Node* m = i.get(); 1345 // 1346 class SimpleDUIterator : public StackObj { 1347 private: 1348 Node* node; 1349 DUIterator_Fast i; 1350 DUIterator_Fast imax; 1351 public: 1352 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} 1353 bool has_next() { return i < imax; } 1354 void next() { i++; } 1355 Node* get() { return node->fast_out(i); } 1356 }; 1357 1358 1359 //----------------------------------------------------------------------------- 1360 // Map dense integer indices to Nodes. Uses classic doubling-array trick. 1361 // Abstractly provides an infinite array of Node*'s, initialized to NULL. 1362 // Note that the constructor just zeros things, and since I use Arena 1363 // allocation I do not need a destructor to reclaim storage. 1364 class Node_Array : public ResourceObj { 1365 friend class VMStructs; 1366 protected: 1367 Arena *_a; // Arena to allocate in 1368 uint _max; 1369 Node **_nodes; 1370 void grow( uint i ); // Grow array node to fit 1371 public: 1372 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { 1373 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); 1374 for( int i = 0; i < OptoNodeListSize; i++ ) { 1375 _nodes[i] = NULL; 1376 } 1377 } 1378 1379 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} 1380 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped 1381 { return (i<_max) ? _nodes[i] : (Node*)NULL; } 1382 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } 1383 Node **adr() { return _nodes; } 1384 // Extend the mapping: index i maps to Node *n. 1385 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } 1386 void insert( uint i, Node *n ); 1387 void remove( uint i ); // Remove, preserving order 1388 void sort( C_sort_func_t func); 1389 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage 1390 void clear(); // Set all entries to NULL, keep storage 1391 uint Size() const { return _max; } 1392 void dump() const; 1393 }; 1394 1395 class Node_List : public Node_Array { 1396 friend class VMStructs; 1397 uint _cnt; 1398 public: 1399 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} 1400 Node_List(Arena *a) : Node_Array(a), _cnt(0) {} 1401 bool contains(const Node* n) const { 1402 for (uint e = 0; e < size(); e++) { 1403 if (at(e) == n) return true; 1404 } 1405 return false; 1406 } 1407 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } 1408 void remove( uint i ) { Node_Array::remove(i); _cnt--; } 1409 void push( Node *b ) { map(_cnt++,b); } 1410 void yank( Node *n ); // Find and remove 1411 Node *pop() { return _nodes[--_cnt]; } 1412 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} 1413 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage 1414 uint size() const { return _cnt; } 1415 void dump() const; 1416 void dump_simple() const; 1417 }; 1418 1419 //------------------------------Unique_Node_List------------------------------- 1420 class Unique_Node_List : public Node_List { 1421 friend class VMStructs; 1422 VectorSet _in_worklist; 1423 uint _clock_index; // Index in list where to pop from next 1424 public: 1425 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} 1426 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} 1427 1428 void remove( Node *n ); 1429 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } 1430 VectorSet &member_set(){ return _in_worklist; } 1431 1432 void push( Node *b ) { 1433 if( !_in_worklist.test_set(b->_idx) ) 1434 Node_List::push(b); 1435 } 1436 Node *pop() { 1437 if( _clock_index >= size() ) _clock_index = 0; 1438 Node *b = at(_clock_index); 1439 map( _clock_index, Node_List::pop()); 1440 if (size() != 0) _clock_index++; // Always start from 0 1441 _in_worklist >>= b->_idx; 1442 return b; 1443 } 1444 Node *remove( uint i ) { 1445 Node *b = Node_List::at(i); 1446 _in_worklist >>= b->_idx; 1447 map(i,Node_List::pop()); 1448 return b; 1449 } 1450 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } 1451 void clear() { 1452 _in_worklist.Clear(); // Discards storage but grows automatically 1453 Node_List::clear(); 1454 _clock_index = 0; 1455 } 1456 1457 // Used after parsing to remove useless nodes before Iterative GVN 1458 void remove_useless_nodes(VectorSet &useful); 1459 1460 #ifndef PRODUCT 1461 void print_set() const { _in_worklist.print(); } 1462 #endif 1463 }; 1464 1465 // Inline definition of Compile::record_for_igvn must be deferred to this point. 1466 inline void Compile::record_for_igvn(Node* n) { 1467 _for_igvn->push(n); 1468 } 1469 1470 //------------------------------Node_Stack------------------------------------- 1471 class Node_Stack { 1472 friend class VMStructs; 1473 protected: 1474 struct INode { 1475 Node *node; // Processed node 1476 uint indx; // Index of next node's child 1477 }; 1478 INode *_inode_top; // tos, stack grows up 1479 INode *_inode_max; // End of _inodes == _inodes + _max 1480 INode *_inodes; // Array storage for the stack 1481 Arena *_a; // Arena to allocate in 1482 void grow(); 1483 public: 1484 Node_Stack(int size) { 1485 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1486 _a = Thread::current()->resource_area(); 1487 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1488 _inode_max = _inodes + max; 1489 _inode_top = _inodes - 1; // stack is empty 1490 } 1491 1492 Node_Stack(Arena *a, int size) : _a(a) { 1493 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1494 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1495 _inode_max = _inodes + max; 1496 _inode_top = _inodes - 1; // stack is empty 1497 } 1498 1499 void pop() { 1500 assert(_inode_top >= _inodes, "node stack underflow"); 1501 --_inode_top; 1502 } 1503 void push(Node *n, uint i) { 1504 ++_inode_top; 1505 if (_inode_top >= _inode_max) grow(); 1506 INode *top = _inode_top; // optimization 1507 top->node = n; 1508 top->indx = i; 1509 } 1510 Node *node() const { 1511 return _inode_top->node; 1512 } 1513 Node* node_at(uint i) const { 1514 assert(_inodes + i <= _inode_top, "in range"); 1515 return _inodes[i].node; 1516 } 1517 uint index() const { 1518 return _inode_top->indx; 1519 } 1520 uint index_at(uint i) const { 1521 assert(_inodes + i <= _inode_top, "in range"); 1522 return _inodes[i].indx; 1523 } 1524 void set_node(Node *n) { 1525 _inode_top->node = n; 1526 } 1527 void set_index(uint i) { 1528 _inode_top->indx = i; 1529 } 1530 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size 1531 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size 1532 bool is_nonempty() const { return (_inode_top >= _inodes); } 1533 bool is_empty() const { return (_inode_top < _inodes); } 1534 void clear() { _inode_top = _inodes - 1; } // retain storage 1535 1536 // Node_Stack is used to map nodes. 1537 Node* find(uint idx) const; 1538 }; 1539 1540 1541 //-----------------------------Node_Notes-------------------------------------- 1542 // Debugging or profiling annotations loosely and sparsely associated 1543 // with some nodes. See Compile::node_notes_at for the accessor. 1544 class Node_Notes VALUE_OBJ_CLASS_SPEC { 1545 friend class VMStructs; 1546 JVMState* _jvms; 1547 1548 public: 1549 Node_Notes(JVMState* jvms = NULL) { 1550 _jvms = jvms; 1551 } 1552 1553 JVMState* jvms() { return _jvms; } 1554 void set_jvms(JVMState* x) { _jvms = x; } 1555 1556 // True if there is nothing here. 1557 bool is_clear() { 1558 return (_jvms == NULL); 1559 } 1560 1561 // Make there be nothing here. 1562 void clear() { 1563 _jvms = NULL; 1564 } 1565 1566 // Make a new, clean node notes. 1567 static Node_Notes* make(Compile* C) { 1568 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1569 nn->clear(); 1570 return nn; 1571 } 1572 1573 Node_Notes* clone(Compile* C) { 1574 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1575 (*nn) = (*this); 1576 return nn; 1577 } 1578 1579 // Absorb any information from source. 1580 bool update_from(Node_Notes* source) { 1581 bool changed = false; 1582 if (source != NULL) { 1583 if (source->jvms() != NULL) { 1584 set_jvms(source->jvms()); 1585 changed = true; 1586 } 1587 } 1588 return changed; 1589 } 1590 }; 1591 1592 // Inlined accessors for Compile::node_nodes that require the preceding class: 1593 inline Node_Notes* 1594 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, 1595 int idx, bool can_grow) { 1596 assert(idx >= 0, "oob"); 1597 int block_idx = (idx >> _log2_node_notes_block_size); 1598 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); 1599 if (grow_by >= 0) { 1600 if (!can_grow) return NULL; 1601 grow_node_notes(arr, grow_by + 1); 1602 } 1603 // (Every element of arr is a sub-array of length _node_notes_block_size.) 1604 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); 1605 } 1606 1607 inline bool 1608 Compile::set_node_notes_at(int idx, Node_Notes* value) { 1609 if (value == NULL || value->is_clear()) 1610 return false; // nothing to write => write nothing 1611 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); 1612 assert(loc != NULL, ""); 1613 return loc->update_from(value); 1614 } 1615 1616 1617 //------------------------------TypeNode--------------------------------------- 1618 // Node with a Type constant. 1619 class TypeNode : public Node { 1620 protected: 1621 virtual uint hash() const; // Check the type 1622 virtual uint cmp( const Node &n ) const; 1623 virtual uint size_of() const; // Size is bigger 1624 const Type* const _type; 1625 public: 1626 void set_type(const Type* t) { 1627 assert(t != NULL, "sanity"); 1628 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 1629 *(const Type**)&_type = t; // cast away const-ness 1630 // If this node is in the hash table, make sure it doesn't need a rehash. 1631 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 1632 } 1633 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 1634 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { 1635 init_class_id(Class_Type); 1636 } 1637 virtual const Type *Value( PhaseTransform *phase ) const; 1638 virtual const Type *bottom_type() const; 1639 virtual uint ideal_reg() const; 1640 #ifndef PRODUCT 1641 virtual void dump_spec(outputStream *st) const; 1642 #endif 1643 }; 1644 1645 #endif // SHARE_VM_OPTO_NODE_HPP