1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_NODE_HPP 26 #define SHARE_OPTO_NODE_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "opto/compile.hpp" 30 #include "opto/type.hpp" 31 32 // Portions of code courtesy of Clifford Click 33 34 // Optimization - Graph Style 35 36 37 class AbstractLockNode; 38 class AddNode; 39 class AddPNode; 40 class AliasInfo; 41 class AllocateArrayNode; 42 class AllocateNode; 43 class ArrayCopyNode; 44 class Block; 45 class BoolNode; 46 class BoxLockNode; 47 class CMoveNode; 48 class CallDynamicJavaNode; 49 class CallJavaNode; 50 class CallLeafNode; 51 class CallNode; 52 class CallRuntimeNode; 53 class CallStaticJavaNode; 54 class CastIINode; 55 class CatchNode; 56 class CatchProjNode; 57 class CheckCastPPNode; 58 class ClearArrayNode; 59 class CmpNode; 60 class CodeBuffer; 61 class ConstraintCastNode; 62 class ConNode; 63 class CompareAndSwapNode; 64 class CompareAndExchangeNode; 65 class CountedLoopNode; 66 class CountedLoopEndNode; 67 class DecodeNarrowPtrNode; 68 class DecodeNNode; 69 class DecodeNKlassNode; 70 class EncodeNarrowPtrNode; 71 class EncodePNode; 72 class EncodePKlassNode; 73 class FastLockNode; 74 class FastUnlockNode; 75 class HaltNode; 76 class IfNode; 77 class IfProjNode; 78 class IfFalseNode; 79 class IfTrueNode; 80 class InitializeNode; 81 class JVMState; 82 class JumpNode; 83 class JumpProjNode; 84 class LoadNode; 85 class LoadStoreNode; 86 class LoadStoreConditionalNode; 87 class LockNode; 88 class LoopNode; 89 class MachBranchNode; 90 class MachCallDynamicJavaNode; 91 class MachCallJavaNode; 92 class MachCallLeafNode; 93 class MachCallNode; 94 class MachCallRuntimeNode; 95 class MachCallStaticJavaNode; 96 class MachConstantBaseNode; 97 class MachConstantNode; 98 class MachGotoNode; 99 class MachIfNode; 100 class MachJumpNode; 101 class MachNode; 102 class MachNullCheckNode; 103 class MachProjNode; 104 class MachReturnNode; 105 class MachSafePointNode; 106 class MachSpillCopyNode; 107 class MachTempNode; 108 class MachMergeNode; 109 class MachMemBarNode; 110 class Matcher; 111 class MemBarNode; 112 class MemBarStoreStoreNode; 113 class MemNode; 114 class MergeMemNode; 115 class MulNode; 116 class MultiNode; 117 class MultiBranchNode; 118 class NeverBranchNode; 119 class Opaque1Node; 120 class OuterStripMinedLoopNode; 121 class OuterStripMinedLoopEndNode; 122 class Node; 123 class Node_Array; 124 class Node_List; 125 class Node_Stack; 126 class NullCheckNode; 127 class OopMap; 128 class ParmNode; 129 class PCTableNode; 130 class PhaseCCP; 131 class PhaseGVN; 132 class PhaseIterGVN; 133 class PhaseRegAlloc; 134 class PhaseTransform; 135 class PhaseValues; 136 class PhiNode; 137 class Pipeline; 138 class ProjNode; 139 class RangeCheckNode; 140 class RegMask; 141 class RegionNode; 142 class RootNode; 143 class SafePointNode; 144 class SafePointScalarObjectNode; 145 class StartNode; 146 class State; 147 class StoreNode; 148 class SubNode; 149 class SubTypeCheckNode; 150 class Type; 151 class TypeNode; 152 class UnlockNode; 153 class VectorNode; 154 class LoadVectorNode; 155 class LoadVectorGatherNode; 156 class StoreVectorNode; 157 class StoreVectorScatterNode; 158 class VectorMaskCmpNode; 159 class VectorSet; 160 typedef void (*NFunc)(Node&,void*); 161 extern "C" { 162 typedef int (*C_sort_func_t)(const void *, const void *); 163 } 164 165 // The type of all node counts and indexes. 166 // It must hold at least 16 bits, but must also be fast to load and store. 167 // This type, if less than 32 bits, could limit the number of possible nodes. 168 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) 169 typedef unsigned int node_idx_t; 170 171 172 #ifndef OPTO_DU_ITERATOR_ASSERT 173 #ifdef ASSERT 174 #define OPTO_DU_ITERATOR_ASSERT 1 175 #else 176 #define OPTO_DU_ITERATOR_ASSERT 0 177 #endif 178 #endif //OPTO_DU_ITERATOR_ASSERT 179 180 #if OPTO_DU_ITERATOR_ASSERT 181 class DUIterator; 182 class DUIterator_Fast; 183 class DUIterator_Last; 184 #else 185 typedef uint DUIterator; 186 typedef Node** DUIterator_Fast; 187 typedef Node** DUIterator_Last; 188 #endif 189 190 // Node Sentinel 191 #define NodeSentinel (Node*)-1 192 193 // Unknown count frequency 194 #define COUNT_UNKNOWN (-1.0f) 195 196 //------------------------------Node------------------------------------------- 197 // Nodes define actions in the program. They create values, which have types. 198 // They are both vertices in a directed graph and program primitives. Nodes 199 // are labeled; the label is the "opcode", the primitive function in the lambda 200 // calculus sense that gives meaning to the Node. Node inputs are ordered (so 201 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to 202 // the Node's function. These inputs also define a Type equation for the Node. 203 // Solving these Type equations amounts to doing dataflow analysis. 204 // Control and data are uniformly represented in the graph. Finally, Nodes 205 // have a unique dense integer index which is used to index into side arrays 206 // whenever I have phase-specific information. 207 208 class Node { 209 friend class VMStructs; 210 211 // Lots of restrictions on cloning Nodes 212 Node(const Node&); // not defined; linker error to use these 213 Node &operator=(const Node &rhs); 214 215 public: 216 friend class Compile; 217 #if OPTO_DU_ITERATOR_ASSERT 218 friend class DUIterator_Common; 219 friend class DUIterator; 220 friend class DUIterator_Fast; 221 friend class DUIterator_Last; 222 #endif 223 224 // Because Nodes come and go, I define an Arena of Node structures to pull 225 // from. This should allow fast access to node creation & deletion. This 226 // field is a local cache of a value defined in some "program fragment" for 227 // which these Nodes are just a part of. 228 229 inline void* operator new(size_t x) throw() { 230 Compile* C = Compile::current(); 231 Node* n = (Node*)C->node_arena()->Amalloc_D(x); 232 return (void*)n; 233 } 234 235 // Delete is a NOP 236 void operator delete( void *ptr ) {} 237 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 238 void destruct(); 239 240 // Create a new Node. Required is the number is of inputs required for 241 // semantic correctness. 242 Node( uint required ); 243 244 // Create a new Node with given input edges. 245 // This version requires use of the "edge-count" new. 246 // E.g. new (C,3) FooNode( C, NULL, left, right ); 247 Node( Node *n0 ); 248 Node( Node *n0, Node *n1 ); 249 Node( Node *n0, Node *n1, Node *n2 ); 250 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); 251 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); 252 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); 253 Node( Node *n0, Node *n1, Node *n2, Node *n3, 254 Node *n4, Node *n5, Node *n6 ); 255 256 // Clone an inherited Node given only the base Node type. 257 Node* clone() const; 258 259 // Clone a Node, immediately supplying one or two new edges. 260 // The first and second arguments, if non-null, replace in(1) and in(2), 261 // respectively. 262 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { 263 Node* nn = clone(); 264 if (in1 != NULL) nn->set_req(1, in1); 265 if (in2 != NULL) nn->set_req(2, in2); 266 return nn; 267 } 268 269 private: 270 // Shared setup for the above constructors. 271 // Handles all interactions with Compile::current. 272 // Puts initial values in all Node fields except _idx. 273 // Returns the initial value for _idx, which cannot 274 // be initialized by assignment. 275 inline int Init(int req); 276 277 //----------------- input edge handling 278 protected: 279 friend class PhaseCFG; // Access to address of _in array elements 280 Node **_in; // Array of use-def references to Nodes 281 Node **_out; // Array of def-use references to Nodes 282 283 // Input edges are split into two categories. Required edges are required 284 // for semantic correctness; order is important and NULLs are allowed. 285 // Precedence edges are used to help determine execution order and are 286 // added, e.g., for scheduling purposes. They are unordered and not 287 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 288 // are required, from _cnt to _max-1 are precedence edges. 289 node_idx_t _cnt; // Total number of required Node inputs. 290 291 node_idx_t _max; // Actual length of input array. 292 293 // Output edges are an unordered list of def-use edges which exactly 294 // correspond to required input edges which point from other nodes 295 // to this one. Thus the count of the output edges is the number of 296 // users of this node. 297 node_idx_t _outcnt; // Total number of Node outputs. 298 299 node_idx_t _outmax; // Actual length of output array. 300 301 // Grow the actual input array to the next larger power-of-2 bigger than len. 302 void grow( uint len ); 303 // Grow the output array to the next larger power-of-2 bigger than len. 304 void out_grow( uint len ); 305 306 public: 307 // Each Node is assigned a unique small/dense number. This number is used 308 // to index into auxiliary arrays of data and bit vectors. 309 // The field _idx is declared constant to defend against inadvertent assignments, 310 // since it is used by clients as a naked field. However, the field's value can be 311 // changed using the set_idx() method. 312 // 313 // The PhaseRenumberLive phase renumbers nodes based on liveness information. 314 // Therefore, it updates the value of the _idx field. The parse-time _idx is 315 // preserved in _parse_idx. 316 const node_idx_t _idx; 317 DEBUG_ONLY(const node_idx_t _parse_idx;) 318 319 // Get the (read-only) number of input edges 320 uint req() const { return _cnt; } 321 uint len() const { return _max; } 322 // Get the (read-only) number of output edges 323 uint outcnt() const { return _outcnt; } 324 325 #if OPTO_DU_ITERATOR_ASSERT 326 // Iterate over the out-edges of this node. Deletions are illegal. 327 inline DUIterator outs() const; 328 // Use this when the out array might have changed to suppress asserts. 329 inline DUIterator& refresh_out_pos(DUIterator& i) const; 330 // Does the node have an out at this position? (Used for iteration.) 331 inline bool has_out(DUIterator& i) const; 332 inline Node* out(DUIterator& i) const; 333 // Iterate over the out-edges of this node. All changes are illegal. 334 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; 335 inline Node* fast_out(DUIterator_Fast& i) const; 336 // Iterate over the out-edges of this node, deleting one at a time. 337 inline DUIterator_Last last_outs(DUIterator_Last& min) const; 338 inline Node* last_out(DUIterator_Last& i) const; 339 // The inline bodies of all these methods are after the iterator definitions. 340 #else 341 // Iterate over the out-edges of this node. Deletions are illegal. 342 // This iteration uses integral indexes, to decouple from array reallocations. 343 DUIterator outs() const { return 0; } 344 // Use this when the out array might have changed to suppress asserts. 345 DUIterator refresh_out_pos(DUIterator i) const { return i; } 346 347 // Reference to the i'th output Node. Error if out of bounds. 348 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } 349 // Does the node have an out at this position? (Used for iteration.) 350 bool has_out(DUIterator i) const { return i < _outcnt; } 351 352 // Iterate over the out-edges of this node. All changes are illegal. 353 // This iteration uses a pointer internal to the out array. 354 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { 355 Node** out = _out; 356 // Assign a limit pointer to the reference argument: 357 max = out + (ptrdiff_t)_outcnt; 358 // Return the base pointer: 359 return out; 360 } 361 Node* fast_out(DUIterator_Fast i) const { return *i; } 362 // Iterate over the out-edges of this node, deleting one at a time. 363 // This iteration uses a pointer internal to the out array. 364 DUIterator_Last last_outs(DUIterator_Last& min) const { 365 Node** out = _out; 366 // Assign a limit pointer to the reference argument: 367 min = out; 368 // Return the pointer to the start of the iteration: 369 return out + (ptrdiff_t)_outcnt - 1; 370 } 371 Node* last_out(DUIterator_Last i) const { return *i; } 372 #endif 373 374 // Reference to the i'th input Node. Error if out of bounds. 375 Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; } 376 // Reference to the i'th input Node. NULL if out of bounds. 377 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } 378 // Reference to the i'th output Node. Error if out of bounds. 379 // Use this accessor sparingly. We are going trying to use iterators instead. 380 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } 381 // Return the unique out edge. 382 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } 383 // Delete out edge at position 'i' by moving last out edge to position 'i' 384 void raw_del_out(uint i) { 385 assert(i < _outcnt,"oob"); 386 assert(_outcnt > 0,"oob"); 387 #if OPTO_DU_ITERATOR_ASSERT 388 // Record that a change happened here. 389 debug_only(_last_del = _out[i]; ++_del_tick); 390 #endif 391 _out[i] = _out[--_outcnt]; 392 // Smash the old edge so it can't be used accidentally. 393 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 394 } 395 396 #ifdef ASSERT 397 bool is_dead() const; 398 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) 399 bool is_reachable_from_root() const; 400 #endif 401 // Check whether node has become unreachable 402 bool is_unreachable(PhaseIterGVN &igvn) const; 403 404 // Set a required input edge, also updates corresponding output edge 405 void add_req( Node *n ); // Append a NEW required input 406 void add_req( Node *n0, Node *n1 ) { 407 add_req(n0); add_req(n1); } 408 void add_req( Node *n0, Node *n1, Node *n2 ) { 409 add_req(n0); add_req(n1); add_req(n2); } 410 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). 411 void del_req( uint idx ); // Delete required edge & compact 412 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order 413 void ins_req( uint i, Node *n ); // Insert a NEW required input 414 void set_req( uint i, Node *n ) { 415 assert( is_not_dead(n), "can not use dead node"); 416 assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt); 417 assert( !VerifyHashTableKeys || _hash_lock == 0, 418 "remove node from hash table before modifying it"); 419 Node** p = &_in[i]; // cache this._in, across the del_out call 420 if (*p != NULL) (*p)->del_out((Node *)this); 421 (*p) = n; 422 if (n != NULL) n->add_out((Node *)this); 423 Compile::current()->record_modified_node(this); 424 } 425 // Light version of set_req() to init inputs after node creation. 426 void init_req( uint i, Node *n ) { 427 assert( i == 0 && this == n || 428 is_not_dead(n), "can not use dead node"); 429 assert( i < _cnt, "oob"); 430 assert( !VerifyHashTableKeys || _hash_lock == 0, 431 "remove node from hash table before modifying it"); 432 assert( _in[i] == NULL, "sanity"); 433 _in[i] = n; 434 if (n != NULL) n->add_out((Node *)this); 435 Compile::current()->record_modified_node(this); 436 } 437 // Find first occurrence of n among my edges: 438 int find_edge(Node* n); 439 int find_prec_edge(Node* n) { 440 for (uint i = req(); i < len(); i++) { 441 if (_in[i] == n) return i; 442 if (_in[i] == NULL) { 443 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); ) 444 break; 445 } 446 } 447 return -1; 448 } 449 int replace_edge(Node* old, Node* neww); 450 int replace_edges_in_range(Node* old, Node* neww, int start, int end); 451 // NULL out all inputs to eliminate incoming Def-Use edges. 452 // Return the number of edges between 'n' and 'this' 453 int disconnect_inputs(Node *n, Compile *c); 454 455 // Quickly, return true if and only if I am Compile::current()->top(). 456 bool is_top() const { 457 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); 458 return (_out == NULL); 459 } 460 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) 461 void setup_is_top(); 462 463 // Strip away casting. (It is depth-limited.) 464 Node* uncast(bool keep_deps = false) const; 465 // Return whether two Nodes are equivalent, after stripping casting. 466 bool eqv_uncast(const Node* n, bool keep_deps = false) const { 467 return (this->uncast(keep_deps) == n->uncast(keep_deps)); 468 } 469 470 // Find out of current node that matches opcode. 471 Node* find_out_with(int opcode); 472 // Return true if the current node has an out that matches opcode. 473 bool has_out_with(int opcode); 474 // Return true if the current node has an out that matches any of the opcodes. 475 bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4); 476 477 private: 478 static Node* uncast_helper(const Node* n, bool keep_deps); 479 480 // Add an output edge to the end of the list 481 void add_out( Node *n ) { 482 if (is_top()) return; 483 if( _outcnt == _outmax ) out_grow(_outcnt); 484 _out[_outcnt++] = n; 485 } 486 // Delete an output edge 487 void del_out( Node *n ) { 488 if (is_top()) return; 489 Node** outp = &_out[_outcnt]; 490 // Find and remove n 491 do { 492 assert(outp > _out, "Missing Def-Use edge"); 493 } while (*--outp != n); 494 *outp = _out[--_outcnt]; 495 // Smash the old edge so it can't be used accidentally. 496 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 497 // Record that a change happened here. 498 #if OPTO_DU_ITERATOR_ASSERT 499 debug_only(_last_del = n; ++_del_tick); 500 #endif 501 } 502 // Close gap after removing edge. 503 void close_prec_gap_at(uint gap) { 504 assert(_cnt <= gap && gap < _max, "no valid prec edge"); 505 uint i = gap; 506 Node *last = NULL; 507 for (; i < _max-1; ++i) { 508 Node *next = _in[i+1]; 509 if (next == NULL) break; 510 last = next; 511 } 512 _in[gap] = last; // Move last slot to empty one. 513 _in[i] = NULL; // NULL out last slot. 514 } 515 516 public: 517 // Globally replace this node by a given new node, updating all uses. 518 void replace_by(Node* new_node); 519 // Globally replace this node by a given new node, updating all uses 520 // and cutting input edges of old node. 521 void subsume_by(Node* new_node, Compile* c) { 522 replace_by(new_node); 523 disconnect_inputs(NULL, c); 524 } 525 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); 526 // Find the one non-null required input. RegionNode only 527 Node *nonnull_req() const; 528 // Add or remove precedence edges 529 void add_prec( Node *n ); 530 void rm_prec( uint i ); 531 532 // Note: prec(i) will not necessarily point to n if edge already exists. 533 void set_prec( uint i, Node *n ) { 534 assert(i < _max, "oob: i=%d, _max=%d", i, _max); 535 assert(is_not_dead(n), "can not use dead node"); 536 assert(i >= _cnt, "not a precedence edge"); 537 // Avoid spec violation: duplicated prec edge. 538 if (_in[i] == n) return; 539 if (n == NULL || find_prec_edge(n) != -1) { 540 rm_prec(i); 541 return; 542 } 543 if (_in[i] != NULL) _in[i]->del_out((Node *)this); 544 _in[i] = n; 545 if (n != NULL) n->add_out((Node *)this); 546 } 547 548 // Set this node's index, used by cisc_version to replace current node 549 void set_idx(uint new_idx) { 550 const node_idx_t* ref = &_idx; 551 *(node_idx_t*)ref = new_idx; 552 } 553 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) 554 void swap_edges(uint i1, uint i2) { 555 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 556 // Def-Use info is unchanged 557 Node* n1 = in(i1); 558 Node* n2 = in(i2); 559 _in[i1] = n2; 560 _in[i2] = n1; 561 // If this node is in the hash table, make sure it doesn't need a rehash. 562 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); 563 } 564 565 // Iterators over input Nodes for a Node X are written as: 566 // for( i = 0; i < X.req(); i++ ) ... X[i] ... 567 // NOTE: Required edges can contain embedded NULL pointers. 568 569 //----------------- Other Node Properties 570 571 // Generate class IDs for (some) ideal nodes so that it is possible to determine 572 // the type of a node using a non-virtual method call (the method is_<Node>() below). 573 // 574 // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines 575 // the type of the node the ID represents; another subset of an ID's bits are reserved 576 // for the superclasses of the node represented by the ID. 577 // 578 // By design, if A is a supertype of B, A.is_B() returns true and B.is_A() 579 // returns false. A.is_A() returns true. 580 // 581 // If two classes, A and B, have the same superclass, a different bit of A's class id 582 // is reserved for A's type than for B's type. That bit is specified by the third 583 // parameter in the macro DEFINE_CLASS_ID. 584 // 585 // By convention, classes with deeper hierarchy are declared first. Moreover, 586 // classes with the same hierarchy depth are sorted by usage frequency. 587 // 588 // The query method masks the bits to cut off bits of subclasses and then compares 589 // the result with the class id (see the macro DEFINE_CLASS_QUERY below). 590 // 591 // Class_MachCall=30, ClassMask_MachCall=31 592 // 12 8 4 0 593 // 0 0 0 0 0 0 0 0 1 1 1 1 0 594 // | | | | 595 // | | | Bit_Mach=2 596 // | | Bit_MachReturn=4 597 // | Bit_MachSafePoint=8 598 // Bit_MachCall=16 599 // 600 // Class_CountedLoop=56, ClassMask_CountedLoop=63 601 // 12 8 4 0 602 // 0 0 0 0 0 0 0 1 1 1 0 0 0 603 // | | | 604 // | | Bit_Region=8 605 // | Bit_Loop=16 606 // Bit_CountedLoop=32 607 608 #define DEFINE_CLASS_ID(cl, supcl, subn) \ 609 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ 610 Class_##cl = Class_##supcl + Bit_##cl , \ 611 ClassMask_##cl = ((Bit_##cl << 1) - 1) , 612 613 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods 614 // so that it's values fits into 16 bits. 615 enum NodeClasses { 616 Bit_Node = 0x00000000, 617 Class_Node = 0x00000000, 618 ClassMask_Node = 0xFFFFFFFF, 619 620 DEFINE_CLASS_ID(Multi, Node, 0) 621 DEFINE_CLASS_ID(SafePoint, Multi, 0) 622 DEFINE_CLASS_ID(Call, SafePoint, 0) 623 DEFINE_CLASS_ID(CallJava, Call, 0) 624 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) 625 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) 626 DEFINE_CLASS_ID(CallRuntime, Call, 1) 627 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) 628 DEFINE_CLASS_ID(Allocate, Call, 2) 629 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) 630 DEFINE_CLASS_ID(AbstractLock, Call, 3) 631 DEFINE_CLASS_ID(Lock, AbstractLock, 0) 632 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) 633 DEFINE_CLASS_ID(ArrayCopy, Call, 4) 634 DEFINE_CLASS_ID(MultiBranch, Multi, 1) 635 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) 636 DEFINE_CLASS_ID(Catch, PCTable, 0) 637 DEFINE_CLASS_ID(Jump, PCTable, 1) 638 DEFINE_CLASS_ID(If, MultiBranch, 1) 639 DEFINE_CLASS_ID(CountedLoopEnd, If, 0) 640 DEFINE_CLASS_ID(RangeCheck, If, 1) 641 DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2) 642 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) 643 DEFINE_CLASS_ID(Start, Multi, 2) 644 DEFINE_CLASS_ID(MemBar, Multi, 3) 645 DEFINE_CLASS_ID(Initialize, MemBar, 0) 646 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) 647 648 DEFINE_CLASS_ID(Mach, Node, 1) 649 DEFINE_CLASS_ID(MachReturn, Mach, 0) 650 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) 651 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) 652 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) 653 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) 654 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 655 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 656 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 657 DEFINE_CLASS_ID(MachBranch, Mach, 1) 658 DEFINE_CLASS_ID(MachIf, MachBranch, 0) 659 DEFINE_CLASS_ID(MachGoto, MachBranch, 1) 660 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) 661 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) 662 DEFINE_CLASS_ID(MachTemp, Mach, 3) 663 DEFINE_CLASS_ID(MachConstantBase, Mach, 4) 664 DEFINE_CLASS_ID(MachConstant, Mach, 5) 665 DEFINE_CLASS_ID(MachJump, MachConstant, 0) 666 DEFINE_CLASS_ID(MachMerge, Mach, 6) 667 DEFINE_CLASS_ID(MachMemBar, Mach, 7) 668 669 DEFINE_CLASS_ID(Type, Node, 2) 670 DEFINE_CLASS_ID(Phi, Type, 0) 671 DEFINE_CLASS_ID(ConstraintCast, Type, 1) 672 DEFINE_CLASS_ID(CastII, ConstraintCast, 0) 673 DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1) 674 DEFINE_CLASS_ID(CMove, Type, 3) 675 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) 676 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) 677 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) 678 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) 679 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) 680 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) 681 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) 682 683 DEFINE_CLASS_ID(Proj, Node, 3) 684 DEFINE_CLASS_ID(CatchProj, Proj, 0) 685 DEFINE_CLASS_ID(JumpProj, Proj, 1) 686 DEFINE_CLASS_ID(IfProj, Proj, 2) 687 DEFINE_CLASS_ID(IfTrue, IfProj, 0) 688 DEFINE_CLASS_ID(IfFalse, IfProj, 1) 689 DEFINE_CLASS_ID(Parm, Proj, 4) 690 DEFINE_CLASS_ID(MachProj, Proj, 5) 691 692 DEFINE_CLASS_ID(Mem, Node, 4) 693 DEFINE_CLASS_ID(Load, Mem, 0) 694 DEFINE_CLASS_ID(LoadVector, Load, 0) 695 DEFINE_CLASS_ID(LoadVectorGather, LoadVector, 0) 696 DEFINE_CLASS_ID(Store, Mem, 1) 697 DEFINE_CLASS_ID(StoreVector, Store, 0) 698 DEFINE_CLASS_ID(StoreVectorScatter, StoreVector, 0) 699 DEFINE_CLASS_ID(LoadStore, Mem, 2) 700 DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0) 701 DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0) 702 DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1) 703 704 DEFINE_CLASS_ID(Region, Node, 5) 705 DEFINE_CLASS_ID(Loop, Region, 0) 706 DEFINE_CLASS_ID(Root, Loop, 0) 707 DEFINE_CLASS_ID(CountedLoop, Loop, 1) 708 DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2) 709 710 DEFINE_CLASS_ID(Sub, Node, 6) 711 DEFINE_CLASS_ID(Cmp, Sub, 0) 712 DEFINE_CLASS_ID(FastLock, Cmp, 0) 713 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) 714 DEFINE_CLASS_ID(SubTypeCheck,Cmp, 2) 715 716 DEFINE_CLASS_ID(MergeMem, Node, 7) 717 DEFINE_CLASS_ID(Bool, Node, 8) 718 DEFINE_CLASS_ID(AddP, Node, 9) 719 DEFINE_CLASS_ID(BoxLock, Node, 10) 720 DEFINE_CLASS_ID(Add, Node, 11) 721 DEFINE_CLASS_ID(Mul, Node, 12) 722 DEFINE_CLASS_ID(Vector, Node, 13) 723 DEFINE_CLASS_ID(VectorMaskCmp, Vector, 0) 724 DEFINE_CLASS_ID(ClearArray, Node, 14) 725 DEFINE_CLASS_ID(Halt, Node, 15) 726 DEFINE_CLASS_ID(Opaque1, Node, 16) 727 728 _max_classes = ClassMask_Halt 729 }; 730 #undef DEFINE_CLASS_ID 731 732 // Flags are sorted by usage frequency. 733 enum NodeFlags { 734 Flag_is_Copy = 0x01, // should be first bit to avoid shift 735 Flag_rematerialize = Flag_is_Copy << 1, 736 Flag_needs_anti_dependence_check = Flag_rematerialize << 1, 737 Flag_is_macro = Flag_needs_anti_dependence_check << 1, 738 Flag_is_Con = Flag_is_macro << 1, 739 Flag_is_cisc_alternate = Flag_is_Con << 1, 740 Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1, 741 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, 742 Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1, 743 Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1, 744 Flag_has_call = Flag_avoid_back_to_back_after << 1, 745 Flag_is_reduction = Flag_has_call << 1, 746 Flag_is_scheduled = Flag_is_reduction << 1, 747 Flag_has_vector_mask_set = Flag_is_scheduled << 1, 748 Flag_is_expensive = Flag_has_vector_mask_set << 1, 749 _last_flag = Flag_is_expensive 750 }; 751 752 class PD; 753 754 private: 755 juint _class_id; 756 jushort _flags; 757 758 static juint max_flags(); 759 760 protected: 761 // These methods should be called from constructors only. 762 void init_class_id(juint c) { 763 _class_id = c; // cast out const 764 } 765 void init_flags(uint fl) { 766 assert(fl <= max_flags(), "invalid node flag"); 767 _flags |= fl; 768 } 769 void clear_flag(uint fl) { 770 assert(fl <= max_flags(), "invalid node flag"); 771 _flags &= ~fl; 772 } 773 774 public: 775 const juint class_id() const { return _class_id; } 776 777 const jushort flags() const { return _flags; } 778 779 void add_flag(jushort fl) { init_flags(fl); } 780 781 void remove_flag(jushort fl) { clear_flag(fl); } 782 783 // Return a dense integer opcode number 784 virtual int Opcode() const; 785 786 // Virtual inherited Node size 787 virtual uint size_of() const; 788 789 // Other interesting Node properties 790 #define DEFINE_CLASS_QUERY(type) \ 791 bool is_##type() const { \ 792 return ((_class_id & ClassMask_##type) == Class_##type); \ 793 } \ 794 type##Node *as_##type() const { \ 795 assert(is_##type(), "invalid node class"); \ 796 return (type##Node*)this; \ 797 } \ 798 type##Node* isa_##type() const { \ 799 return (is_##type()) ? as_##type() : NULL; \ 800 } 801 802 DEFINE_CLASS_QUERY(AbstractLock) 803 DEFINE_CLASS_QUERY(Add) 804 DEFINE_CLASS_QUERY(AddP) 805 DEFINE_CLASS_QUERY(Allocate) 806 DEFINE_CLASS_QUERY(AllocateArray) 807 DEFINE_CLASS_QUERY(ArrayCopy) 808 DEFINE_CLASS_QUERY(Bool) 809 DEFINE_CLASS_QUERY(BoxLock) 810 DEFINE_CLASS_QUERY(Call) 811 DEFINE_CLASS_QUERY(CallDynamicJava) 812 DEFINE_CLASS_QUERY(CallJava) 813 DEFINE_CLASS_QUERY(CallLeaf) 814 DEFINE_CLASS_QUERY(CallRuntime) 815 DEFINE_CLASS_QUERY(CallStaticJava) 816 DEFINE_CLASS_QUERY(Catch) 817 DEFINE_CLASS_QUERY(CatchProj) 818 DEFINE_CLASS_QUERY(CheckCastPP) 819 DEFINE_CLASS_QUERY(CastII) 820 DEFINE_CLASS_QUERY(ConstraintCast) 821 DEFINE_CLASS_QUERY(ClearArray) 822 DEFINE_CLASS_QUERY(CMove) 823 DEFINE_CLASS_QUERY(Cmp) 824 DEFINE_CLASS_QUERY(CountedLoop) 825 DEFINE_CLASS_QUERY(CountedLoopEnd) 826 DEFINE_CLASS_QUERY(DecodeNarrowPtr) 827 DEFINE_CLASS_QUERY(DecodeN) 828 DEFINE_CLASS_QUERY(DecodeNKlass) 829 DEFINE_CLASS_QUERY(EncodeNarrowPtr) 830 DEFINE_CLASS_QUERY(EncodeP) 831 DEFINE_CLASS_QUERY(EncodePKlass) 832 DEFINE_CLASS_QUERY(FastLock) 833 DEFINE_CLASS_QUERY(FastUnlock) 834 DEFINE_CLASS_QUERY(Halt) 835 DEFINE_CLASS_QUERY(If) 836 DEFINE_CLASS_QUERY(RangeCheck) 837 DEFINE_CLASS_QUERY(IfProj) 838 DEFINE_CLASS_QUERY(IfFalse) 839 DEFINE_CLASS_QUERY(IfTrue) 840 DEFINE_CLASS_QUERY(Initialize) 841 DEFINE_CLASS_QUERY(Jump) 842 DEFINE_CLASS_QUERY(JumpProj) 843 DEFINE_CLASS_QUERY(Load) 844 DEFINE_CLASS_QUERY(LoadStore) 845 DEFINE_CLASS_QUERY(LoadStoreConditional) 846 DEFINE_CLASS_QUERY(Lock) 847 DEFINE_CLASS_QUERY(Loop) 848 DEFINE_CLASS_QUERY(Mach) 849 DEFINE_CLASS_QUERY(MachBranch) 850 DEFINE_CLASS_QUERY(MachCall) 851 DEFINE_CLASS_QUERY(MachCallDynamicJava) 852 DEFINE_CLASS_QUERY(MachCallJava) 853 DEFINE_CLASS_QUERY(MachCallLeaf) 854 DEFINE_CLASS_QUERY(MachCallRuntime) 855 DEFINE_CLASS_QUERY(MachCallStaticJava) 856 DEFINE_CLASS_QUERY(MachConstantBase) 857 DEFINE_CLASS_QUERY(MachConstant) 858 DEFINE_CLASS_QUERY(MachGoto) 859 DEFINE_CLASS_QUERY(MachIf) 860 DEFINE_CLASS_QUERY(MachJump) 861 DEFINE_CLASS_QUERY(MachNullCheck) 862 DEFINE_CLASS_QUERY(MachProj) 863 DEFINE_CLASS_QUERY(MachReturn) 864 DEFINE_CLASS_QUERY(MachSafePoint) 865 DEFINE_CLASS_QUERY(MachSpillCopy) 866 DEFINE_CLASS_QUERY(MachTemp) 867 DEFINE_CLASS_QUERY(MachMemBar) 868 DEFINE_CLASS_QUERY(MachMerge) 869 DEFINE_CLASS_QUERY(Mem) 870 DEFINE_CLASS_QUERY(MemBar) 871 DEFINE_CLASS_QUERY(MemBarStoreStore) 872 DEFINE_CLASS_QUERY(MergeMem) 873 DEFINE_CLASS_QUERY(Mul) 874 DEFINE_CLASS_QUERY(Multi) 875 DEFINE_CLASS_QUERY(MultiBranch) 876 DEFINE_CLASS_QUERY(Opaque1) 877 DEFINE_CLASS_QUERY(OuterStripMinedLoop) 878 DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd) 879 DEFINE_CLASS_QUERY(Parm) 880 DEFINE_CLASS_QUERY(PCTable) 881 DEFINE_CLASS_QUERY(Phi) 882 DEFINE_CLASS_QUERY(Proj) 883 DEFINE_CLASS_QUERY(Region) 884 DEFINE_CLASS_QUERY(Root) 885 DEFINE_CLASS_QUERY(SafePoint) 886 DEFINE_CLASS_QUERY(SafePointScalarObject) 887 DEFINE_CLASS_QUERY(Start) 888 DEFINE_CLASS_QUERY(Store) 889 DEFINE_CLASS_QUERY(Sub) 890 DEFINE_CLASS_QUERY(SubTypeCheck) 891 DEFINE_CLASS_QUERY(Type) 892 DEFINE_CLASS_QUERY(Vector) 893 DEFINE_CLASS_QUERY(LoadVector) 894 DEFINE_CLASS_QUERY(LoadVectorGather) 895 DEFINE_CLASS_QUERY(StoreVector) 896 DEFINE_CLASS_QUERY(StoreVectorScatter) 897 DEFINE_CLASS_QUERY(VectorMaskCmp) 898 DEFINE_CLASS_QUERY(Unlock) 899 900 #undef DEFINE_CLASS_QUERY 901 902 // duplicate of is_MachSpillCopy() 903 bool is_SpillCopy () const { 904 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); 905 } 906 907 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } 908 // The data node which is safe to leave in dead loop during IGVN optimization. 909 bool is_dead_loop_safe() const { 910 return is_Phi() || (is_Proj() && in(0) == NULL) || 911 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && 912 (!is_Proj() || !in(0)->is_Allocate())); 913 } 914 915 // is_Copy() returns copied edge index (0 or 1) 916 uint is_Copy() const { return (_flags & Flag_is_Copy); } 917 918 virtual bool is_CFG() const { return false; } 919 920 // If this node is control-dependent on a test, can it be 921 // rerouted to a dominating equivalent test? This is usually 922 // true of non-CFG nodes, but can be false for operations which 923 // depend for their correct sequencing on more than one test. 924 // (In that case, hoisting to a dominating test may silently 925 // skip some other important test.) 926 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; 927 928 // When building basic blocks, I need to have a notion of block beginning 929 // Nodes, next block selector Nodes (block enders), and next block 930 // projections. These calls need to work on their machine equivalents. The 931 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. 932 bool is_block_start() const { 933 if ( is_Region() ) 934 return this == (const Node*)in(0); 935 else 936 return is_Start(); 937 } 938 939 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, 940 // Goto and Return. This call also returns the block ending Node. 941 virtual const Node *is_block_proj() const; 942 943 // The node is a "macro" node which needs to be expanded before matching 944 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } 945 // The node is expensive: the best control is set during loop opts 946 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; } 947 948 // An arithmetic node which accumulates a data in a loop. 949 // It must have the loop's phi as input and provide a def to the phi. 950 bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; } 951 952 // The node is a CountedLoopEnd with a mask annotation so as to emit a restore context 953 bool has_vector_mask_set() const { return (_flags & Flag_has_vector_mask_set) != 0; } 954 955 // Used in lcm to mark nodes that have scheduled 956 bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; } 957 958 //----------------- Optimization 959 960 // Get the worst-case Type output for this Node. 961 virtual const class Type *bottom_type() const; 962 963 // If we find a better type for a node, try to record it permanently. 964 // Return true if this node actually changed. 965 // Be sure to do the hash_delete game in the "rehash" variant. 966 void raise_bottom_type(const Type* new_type); 967 968 // Get the address type with which this node uses and/or defs memory, 969 // or NULL if none. The address type is conservatively wide. 970 // Returns non-null for calls, membars, loads, stores, etc. 971 // Returns TypePtr::BOTTOM if the node touches memory "broadly". 972 virtual const class TypePtr *adr_type() const { return NULL; } 973 974 // Return an existing node which computes the same function as this node. 975 // The optimistic combined algorithm requires this to return a Node which 976 // is a small number of steps away (e.g., one of my inputs). 977 virtual Node* Identity(PhaseGVN* phase); 978 979 // Return the set of values this Node can take on at runtime. 980 virtual const Type* Value(PhaseGVN* phase) const; 981 982 // Return a node which is more "ideal" than the current node. 983 // The invariants on this call are subtle. If in doubt, read the 984 // treatise in node.cpp above the default implemention AND TEST WITH 985 // +VerifyIterativeGVN! 986 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 987 988 // Some nodes have specific Ideal subgraph transformations only if they are 989 // unique users of specific nodes. Such nodes should be put on IGVN worklist 990 // for the transformations to happen. 991 bool has_special_unique_user() const; 992 993 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 994 Node* find_exact_control(Node* ctrl); 995 996 // Check if 'this' node dominates or equal to 'sub'. 997 bool dominates(Node* sub, Node_List &nlist); 998 999 protected: 1000 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); 1001 public: 1002 1003 // See if there is valid pipeline info 1004 static const Pipeline *pipeline_class(); 1005 virtual const Pipeline *pipeline() const; 1006 1007 // Compute the latency from the def to this instruction of the ith input node 1008 uint latency(uint i); 1009 1010 // Hash & compare functions, for pessimistic value numbering 1011 1012 // If the hash function returns the special sentinel value NO_HASH, 1013 // the node is guaranteed never to compare equal to any other node. 1014 // If we accidentally generate a hash with value NO_HASH the node 1015 // won't go into the table and we'll lose a little optimization. 1016 static const uint NO_HASH = 0; 1017 virtual uint hash() const; 1018 virtual bool cmp( const Node &n ) const; 1019 1020 // Operation appears to be iteratively computed (such as an induction variable) 1021 // It is possible for this operation to return false for a loop-varying 1022 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 1023 bool is_iteratively_computed(); 1024 1025 // Determine if a node is a counted loop induction variable. 1026 // NOTE: The method is defined in "loopnode.cpp". 1027 bool is_cloop_ind_var() const; 1028 1029 // Return a node with opcode "opc" and same inputs as "this" if one can 1030 // be found; Otherwise return NULL; 1031 Node* find_similar(int opc); 1032 1033 // Return the unique control out if only one. Null if none or more than one. 1034 Node* unique_ctrl_out() const; 1035 1036 // Set control or add control as precedence edge 1037 void ensure_control_or_add_prec(Node* c); 1038 1039 //----------------- Code Generation 1040 1041 // Ideal register class for Matching. Zero means unmatched instruction 1042 // (these are cloned instead of converted to machine nodes). 1043 virtual uint ideal_reg() const; 1044 1045 static const uint NotAMachineReg; // must be > max. machine register 1046 1047 // Do we Match on this edge index or not? Generally false for Control 1048 // and true for everything else. Weird for calls & returns. 1049 virtual uint match_edge(uint idx) const; 1050 1051 // Register class output is returned in 1052 virtual const RegMask &out_RegMask() const; 1053 // Register class input is expected in 1054 virtual const RegMask &in_RegMask(uint) const; 1055 // Should we clone rather than spill this instruction? 1056 bool rematerialize() const; 1057 1058 // Return JVM State Object if this Node carries debug info, or NULL otherwise 1059 virtual JVMState* jvms() const; 1060 1061 // Print as assembly 1062 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; 1063 // Emit bytes starting at parameter 'ptr' 1064 // Bump 'ptr' by the number of output bytes 1065 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; 1066 // Size of instruction in bytes 1067 virtual uint size(PhaseRegAlloc *ra_) const; 1068 1069 // Convenience function to extract an integer constant from a node. 1070 // If it is not an integer constant (either Con, CastII, or Mach), 1071 // return value_if_unknown. 1072 jint find_int_con(jint value_if_unknown) const { 1073 const TypeInt* t = find_int_type(); 1074 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 1075 } 1076 // Return the constant, knowing it is an integer constant already 1077 jint get_int() const { 1078 const TypeInt* t = find_int_type(); 1079 guarantee(t != NULL, "must be con"); 1080 return t->get_con(); 1081 } 1082 // Here's where the work is done. Can produce non-constant int types too. 1083 const TypeInt* find_int_type() const; 1084 1085 // Same thing for long (and intptr_t, via type.hpp): 1086 jlong get_long() const { 1087 const TypeLong* t = find_long_type(); 1088 guarantee(t != NULL, "must be con"); 1089 return t->get_con(); 1090 } 1091 jlong find_long_con(jint value_if_unknown) const { 1092 const TypeLong* t = find_long_type(); 1093 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 1094 } 1095 const TypeLong* find_long_type() const; 1096 1097 const TypePtr* get_ptr_type() const; 1098 1099 // These guys are called by code generated by ADLC: 1100 intptr_t get_ptr() const; 1101 intptr_t get_narrowcon() const; 1102 jdouble getd() const; 1103 jfloat getf() const; 1104 1105 // Nodes which are pinned into basic blocks 1106 virtual bool pinned() const { return false; } 1107 1108 // Nodes which use memory without consuming it, hence need antidependences 1109 // More specifically, needs_anti_dependence_check returns true iff the node 1110 // (a) does a load, and (b) does not perform a store (except perhaps to a 1111 // stack slot or some other unaliased location). 1112 bool needs_anti_dependence_check() const; 1113 1114 // Return which operand this instruction may cisc-spill. In other words, 1115 // return operand position that can convert from reg to memory access 1116 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } 1117 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } 1118 1119 //----------------- Graph walking 1120 public: 1121 // Walk and apply member functions recursively. 1122 // Supplied (this) pointer is root. 1123 void walk(NFunc pre, NFunc post, void *env); 1124 static void nop(Node &, void*); // Dummy empty function 1125 static void packregion( Node &n, void* ); 1126 private: 1127 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); 1128 1129 //----------------- Printing, etc 1130 #ifndef PRODUCT 1131 static bool add_to_worklist(Node* n, Node_List* worklist, Arena* old_arena, VectorSet* old_space, VectorSet* new_space); 1132 public: 1133 Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx. 1134 Node* find_ctrl(int idx); // Search control ancestors for the given idx. 1135 void dump() const { dump("\n"); } // Print this node. 1136 void dump(const char* suffix, bool mark = false, outputStream *st = tty) const; // Print this node. 1137 void dump(int depth) const; // Print this node, recursively to depth d 1138 void dump_ctrl(int depth) const; // Print control nodes, to depth d 1139 void dump_comp() const; // Print this node in compact representation. 1140 // Print this node in compact representation. 1141 void dump_comp(const char* suffix, outputStream *st = tty) const; 1142 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info 1143 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info 1144 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info 1145 virtual void dump_spec(outputStream *st) const {}; // Print per-node info 1146 // Print compact per-node info 1147 virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); } 1148 void dump_related() const; // Print related nodes (depends on node at hand). 1149 // Print related nodes up to given depths for input and output nodes. 1150 void dump_related(uint d_in, uint d_out) const; 1151 void dump_related_compact() const; // Print related nodes in compact representation. 1152 // Collect related nodes. 1153 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1154 // Collect nodes starting from this node, explicitly including/excluding control and data links. 1155 void collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const; 1156 1157 // Node collectors, to be used in implementations of Node::rel(). 1158 // Collect the entire data input graph. Include control inputs if requested. 1159 void collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const; 1160 // Collect the entire control input graph. Include data inputs if requested. 1161 void collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const; 1162 // Collect the entire output graph until hitting and including control nodes. 1163 void collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const; 1164 1165 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges 1166 static void verify(Node* n, int verify_depth); 1167 1168 // This call defines a class-unique string used to identify class instances 1169 virtual const char *Name() const; 1170 1171 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) 1172 // RegMask Print Functions 1173 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } 1174 void dump_out_regmask() { out_RegMask().dump(); } 1175 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } 1176 void fast_dump() const { 1177 tty->print("%4d: %-17s", _idx, Name()); 1178 for (uint i = 0; i < len(); i++) 1179 if (in(i)) 1180 tty->print(" %4d", in(i)->_idx); 1181 else 1182 tty->print(" NULL"); 1183 tty->print("\n"); 1184 } 1185 #endif 1186 #ifdef ASSERT 1187 void verify_construction(); 1188 bool verify_jvms(const JVMState* jvms) const; 1189 int _debug_idx; // Unique value assigned to every node. 1190 int debug_idx() const { return _debug_idx; } 1191 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } 1192 1193 Node* _debug_orig; // Original version of this, if any. 1194 Node* debug_orig() const { return _debug_orig; } 1195 void set_debug_orig(Node* orig); // _debug_orig = orig 1196 void dump_orig(outputStream *st, bool print_key = true) const; 1197 1198 int _hash_lock; // Barrier to modifications of nodes in the hash table 1199 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } 1200 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } 1201 1202 static void init_NodeProperty(); 1203 1204 #if OPTO_DU_ITERATOR_ASSERT 1205 const Node* _last_del; // The last deleted node. 1206 uint _del_tick; // Bumped when a deletion happens.. 1207 #endif 1208 #endif 1209 }; 1210 1211 1212 #ifndef PRODUCT 1213 1214 // Used in debugging code to avoid walking across dead or uninitialized edges. 1215 inline bool NotANode(const Node* n) { 1216 if (n == NULL) return true; 1217 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1218 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1219 return false; 1220 } 1221 1222 #endif 1223 1224 1225 //----------------------------------------------------------------------------- 1226 // Iterators over DU info, and associated Node functions. 1227 1228 #if OPTO_DU_ITERATOR_ASSERT 1229 1230 // Common code for assertion checking on DU iterators. 1231 class DUIterator_Common { 1232 #ifdef ASSERT 1233 protected: 1234 bool _vdui; // cached value of VerifyDUIterators 1235 const Node* _node; // the node containing the _out array 1236 uint _outcnt; // cached node->_outcnt 1237 uint _del_tick; // cached node->_del_tick 1238 Node* _last; // last value produced by the iterator 1239 1240 void sample(const Node* node); // used by c'tor to set up for verifies 1241 void verify(const Node* node, bool at_end_ok = false); 1242 void verify_resync(); 1243 void reset(const DUIterator_Common& that); 1244 1245 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators 1246 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } 1247 #else 1248 #define I_VDUI_ONLY(i,x) { } 1249 #endif //ASSERT 1250 }; 1251 1252 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) 1253 1254 // Default DU iterator. Allows appends onto the out array. 1255 // Allows deletion from the out array only at the current point. 1256 // Usage: 1257 // for (DUIterator i = x->outs(); x->has_out(i); i++) { 1258 // Node* y = x->out(i); 1259 // ... 1260 // } 1261 // Compiles in product mode to a unsigned integer index, which indexes 1262 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate 1263 // also reloads x->_outcnt. If you delete, you must perform "--i" just 1264 // before continuing the loop. You must delete only the last-produced 1265 // edge. You must delete only a single copy of the last-produced edge, 1266 // or else you must delete all copies at once (the first time the edge 1267 // is produced by the iterator). 1268 class DUIterator : public DUIterator_Common { 1269 friend class Node; 1270 1271 // This is the index which provides the product-mode behavior. 1272 // Whatever the product-mode version of the system does to the 1273 // DUI index is done to this index. All other fields in 1274 // this class are used only for assertion checking. 1275 uint _idx; 1276 1277 #ifdef ASSERT 1278 uint _refresh_tick; // Records the refresh activity. 1279 1280 void sample(const Node* node); // Initialize _refresh_tick etc. 1281 void verify(const Node* node, bool at_end_ok = false); 1282 void verify_increment(); // Verify an increment operation. 1283 void verify_resync(); // Verify that we can back up over a deletion. 1284 void verify_finish(); // Verify that the loop terminated properly. 1285 void refresh(); // Resample verification info. 1286 void reset(const DUIterator& that); // Resample after assignment. 1287 #endif 1288 1289 DUIterator(const Node* node, int dummy_to_avoid_conversion) 1290 { _idx = 0; debug_only(sample(node)); } 1291 1292 public: 1293 // initialize to garbage; clear _vdui to disable asserts 1294 DUIterator() 1295 { /*initialize to garbage*/ debug_only(_vdui = false); } 1296 1297 void operator++(int dummy_to_specify_postfix_op) 1298 { _idx++; VDUI_ONLY(verify_increment()); } 1299 1300 void operator--() 1301 { VDUI_ONLY(verify_resync()); --_idx; } 1302 1303 ~DUIterator() 1304 { VDUI_ONLY(verify_finish()); } 1305 1306 void operator=(const DUIterator& that) 1307 { _idx = that._idx; debug_only(reset(that)); } 1308 }; 1309 1310 DUIterator Node::outs() const 1311 { return DUIterator(this, 0); } 1312 DUIterator& Node::refresh_out_pos(DUIterator& i) const 1313 { I_VDUI_ONLY(i, i.refresh()); return i; } 1314 bool Node::has_out(DUIterator& i) const 1315 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } 1316 Node* Node::out(DUIterator& i) const 1317 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } 1318 1319 1320 // Faster DU iterator. Disallows insertions into the out array. 1321 // Allows deletion from the out array only at the current point. 1322 // Usage: 1323 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { 1324 // Node* y = x->fast_out(i); 1325 // ... 1326 // } 1327 // Compiles in product mode to raw Node** pointer arithmetic, with 1328 // no reloading of pointers from the original node x. If you delete, 1329 // you must perform "--i; --imax" just before continuing the loop. 1330 // If you delete multiple copies of the same edge, you must decrement 1331 // imax, but not i, multiple times: "--i, imax -= num_edges". 1332 class DUIterator_Fast : public DUIterator_Common { 1333 friend class Node; 1334 friend class DUIterator_Last; 1335 1336 // This is the pointer which provides the product-mode behavior. 1337 // Whatever the product-mode version of the system does to the 1338 // DUI pointer is done to this pointer. All other fields in 1339 // this class are used only for assertion checking. 1340 Node** _outp; 1341 1342 #ifdef ASSERT 1343 void verify(const Node* node, bool at_end_ok = false); 1344 void verify_limit(); 1345 void verify_resync(); 1346 void verify_relimit(uint n); 1347 void reset(const DUIterator_Fast& that); 1348 #endif 1349 1350 // Note: offset must be signed, since -1 is sometimes passed 1351 DUIterator_Fast(const Node* node, ptrdiff_t offset) 1352 { _outp = node->_out + offset; debug_only(sample(node)); } 1353 1354 public: 1355 // initialize to garbage; clear _vdui to disable asserts 1356 DUIterator_Fast() 1357 { /*initialize to garbage*/ debug_only(_vdui = false); } 1358 1359 void operator++(int dummy_to_specify_postfix_op) 1360 { _outp++; VDUI_ONLY(verify(_node, true)); } 1361 1362 void operator--() 1363 { VDUI_ONLY(verify_resync()); --_outp; } 1364 1365 void operator-=(uint n) // applied to the limit only 1366 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } 1367 1368 bool operator<(DUIterator_Fast& limit) { 1369 I_VDUI_ONLY(*this, this->verify(_node, true)); 1370 I_VDUI_ONLY(limit, limit.verify_limit()); 1371 return _outp < limit._outp; 1372 } 1373 1374 void operator=(const DUIterator_Fast& that) 1375 { _outp = that._outp; debug_only(reset(that)); } 1376 }; 1377 1378 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { 1379 // Assign a limit pointer to the reference argument: 1380 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); 1381 // Return the base pointer: 1382 return DUIterator_Fast(this, 0); 1383 } 1384 Node* Node::fast_out(DUIterator_Fast& i) const { 1385 I_VDUI_ONLY(i, i.verify(this)); 1386 return debug_only(i._last=) *i._outp; 1387 } 1388 1389 1390 // Faster DU iterator. Requires each successive edge to be removed. 1391 // Does not allow insertion of any edges. 1392 // Usage: 1393 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { 1394 // Node* y = x->last_out(i); 1395 // ... 1396 // } 1397 // Compiles in product mode to raw Node** pointer arithmetic, with 1398 // no reloading of pointers from the original node x. 1399 class DUIterator_Last : private DUIterator_Fast { 1400 friend class Node; 1401 1402 #ifdef ASSERT 1403 void verify(const Node* node, bool at_end_ok = false); 1404 void verify_limit(); 1405 void verify_step(uint num_edges); 1406 #endif 1407 1408 // Note: offset must be signed, since -1 is sometimes passed 1409 DUIterator_Last(const Node* node, ptrdiff_t offset) 1410 : DUIterator_Fast(node, offset) { } 1411 1412 void operator++(int dummy_to_specify_postfix_op) {} // do not use 1413 void operator<(int) {} // do not use 1414 1415 public: 1416 DUIterator_Last() { } 1417 // initialize to garbage 1418 1419 void operator--() 1420 { _outp--; VDUI_ONLY(verify_step(1)); } 1421 1422 void operator-=(uint n) 1423 { _outp -= n; VDUI_ONLY(verify_step(n)); } 1424 1425 bool operator>=(DUIterator_Last& limit) { 1426 I_VDUI_ONLY(*this, this->verify(_node, true)); 1427 I_VDUI_ONLY(limit, limit.verify_limit()); 1428 return _outp >= limit._outp; 1429 } 1430 1431 void operator=(const DUIterator_Last& that) 1432 { DUIterator_Fast::operator=(that); } 1433 }; 1434 1435 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { 1436 // Assign a limit pointer to the reference argument: 1437 imin = DUIterator_Last(this, 0); 1438 // Return the initial pointer: 1439 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); 1440 } 1441 Node* Node::last_out(DUIterator_Last& i) const { 1442 I_VDUI_ONLY(i, i.verify(this)); 1443 return debug_only(i._last=) *i._outp; 1444 } 1445 1446 #endif //OPTO_DU_ITERATOR_ASSERT 1447 1448 #undef I_VDUI_ONLY 1449 #undef VDUI_ONLY 1450 1451 // An Iterator that truly follows the iterator pattern. Doesn't 1452 // support deletion but could be made to. 1453 // 1454 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { 1455 // Node* m = i.get(); 1456 // 1457 class SimpleDUIterator : public StackObj { 1458 private: 1459 Node* node; 1460 DUIterator_Fast i; 1461 DUIterator_Fast imax; 1462 public: 1463 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} 1464 bool has_next() { return i < imax; } 1465 void next() { i++; } 1466 Node* get() { return node->fast_out(i); } 1467 }; 1468 1469 1470 //----------------------------------------------------------------------------- 1471 // Map dense integer indices to Nodes. Uses classic doubling-array trick. 1472 // Abstractly provides an infinite array of Node*'s, initialized to NULL. 1473 // Note that the constructor just zeros things, and since I use Arena 1474 // allocation I do not need a destructor to reclaim storage. 1475 class Node_Array : public ResourceObj { 1476 friend class VMStructs; 1477 protected: 1478 Arena *_a; // Arena to allocate in 1479 uint _max; 1480 Node **_nodes; 1481 void grow( uint i ); // Grow array node to fit 1482 public: 1483 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { 1484 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); 1485 for( int i = 0; i < OptoNodeListSize; i++ ) { 1486 _nodes[i] = NULL; 1487 } 1488 } 1489 1490 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} 1491 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped 1492 { return (i<_max) ? _nodes[i] : (Node*)NULL; } 1493 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } 1494 Node **adr() { return _nodes; } 1495 // Extend the mapping: index i maps to Node *n. 1496 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } 1497 void insert( uint i, Node *n ); 1498 void remove( uint i ); // Remove, preserving order 1499 void sort( C_sort_func_t func); 1500 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage 1501 void clear(); // Set all entries to NULL, keep storage 1502 uint Size() const { return _max; } 1503 void dump() const; 1504 }; 1505 1506 class Node_List : public Node_Array { 1507 friend class VMStructs; 1508 uint _cnt; 1509 public: 1510 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} 1511 Node_List(Arena *a) : Node_Array(a), _cnt(0) {} 1512 bool contains(const Node* n) const { 1513 for (uint e = 0; e < size(); e++) { 1514 if (at(e) == n) return true; 1515 } 1516 return false; 1517 } 1518 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } 1519 void remove( uint i ) { Node_Array::remove(i); _cnt--; } 1520 void push( Node *b ) { map(_cnt++,b); } 1521 void yank( Node *n ); // Find and remove 1522 Node *pop() { return _nodes[--_cnt]; } 1523 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} 1524 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage 1525 uint size() const { return _cnt; } 1526 void dump() const; 1527 void dump_simple() const; 1528 }; 1529 1530 //------------------------------Unique_Node_List------------------------------- 1531 class Unique_Node_List : public Node_List { 1532 friend class VMStructs; 1533 VectorSet _in_worklist; 1534 uint _clock_index; // Index in list where to pop from next 1535 public: 1536 Unique_Node_List() : Node_List(), _clock_index(0) {} 1537 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} 1538 1539 void remove( Node *n ); 1540 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } 1541 VectorSet& member_set(){ return _in_worklist; } 1542 1543 void push(Node* b) { 1544 if( !_in_worklist.test_set(b->_idx) ) 1545 Node_List::push(b); 1546 } 1547 Node *pop() { 1548 if( _clock_index >= size() ) _clock_index = 0; 1549 Node *b = at(_clock_index); 1550 map( _clock_index, Node_List::pop()); 1551 if (size() != 0) _clock_index++; // Always start from 0 1552 _in_worklist.remove(b->_idx); 1553 return b; 1554 } 1555 Node *remove(uint i) { 1556 Node *b = Node_List::at(i); 1557 _in_worklist.remove(b->_idx); 1558 map(i,Node_List::pop()); 1559 return b; 1560 } 1561 void yank(Node *n) { 1562 _in_worklist.remove(n->_idx); 1563 Node_List::yank(n); 1564 } 1565 void clear() { 1566 _in_worklist.clear(); // Discards storage but grows automatically 1567 Node_List::clear(); 1568 _clock_index = 0; 1569 } 1570 1571 // Used after parsing to remove useless nodes before Iterative GVN 1572 void remove_useless_nodes(VectorSet& useful); 1573 1574 bool contains(const Node* n) const { 1575 fatal("use faster member() instead"); 1576 return false; 1577 } 1578 1579 #ifndef PRODUCT 1580 void print_set() const { _in_worklist.print(); } 1581 #endif 1582 }; 1583 1584 // Inline definition of Compile::record_for_igvn must be deferred to this point. 1585 inline void Compile::record_for_igvn(Node* n) { 1586 _for_igvn->push(n); 1587 } 1588 1589 //------------------------------Node_Stack------------------------------------- 1590 class Node_Stack { 1591 friend class VMStructs; 1592 protected: 1593 struct INode { 1594 Node *node; // Processed node 1595 uint indx; // Index of next node's child 1596 }; 1597 INode *_inode_top; // tos, stack grows up 1598 INode *_inode_max; // End of _inodes == _inodes + _max 1599 INode *_inodes; // Array storage for the stack 1600 Arena *_a; // Arena to allocate in 1601 void grow(); 1602 public: 1603 Node_Stack(int size) { 1604 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1605 _a = Thread::current()->resource_area(); 1606 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1607 _inode_max = _inodes + max; 1608 _inode_top = _inodes - 1; // stack is empty 1609 } 1610 1611 Node_Stack(Arena *a, int size) : _a(a) { 1612 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1613 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1614 _inode_max = _inodes + max; 1615 _inode_top = _inodes - 1; // stack is empty 1616 } 1617 1618 void pop() { 1619 assert(_inode_top >= _inodes, "node stack underflow"); 1620 --_inode_top; 1621 } 1622 void push(Node *n, uint i) { 1623 ++_inode_top; 1624 if (_inode_top >= _inode_max) grow(); 1625 INode *top = _inode_top; // optimization 1626 top->node = n; 1627 top->indx = i; 1628 } 1629 Node *node() const { 1630 return _inode_top->node; 1631 } 1632 Node* node_at(uint i) const { 1633 assert(_inodes + i <= _inode_top, "in range"); 1634 return _inodes[i].node; 1635 } 1636 uint index() const { 1637 return _inode_top->indx; 1638 } 1639 uint index_at(uint i) const { 1640 assert(_inodes + i <= _inode_top, "in range"); 1641 return _inodes[i].indx; 1642 } 1643 void set_node(Node *n) { 1644 _inode_top->node = n; 1645 } 1646 void set_index(uint i) { 1647 _inode_top->indx = i; 1648 } 1649 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size 1650 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size 1651 bool is_nonempty() const { return (_inode_top >= _inodes); } 1652 bool is_empty() const { return (_inode_top < _inodes); } 1653 void clear() { _inode_top = _inodes - 1; } // retain storage 1654 1655 // Node_Stack is used to map nodes. 1656 Node* find(uint idx) const; 1657 }; 1658 1659 1660 //-----------------------------Node_Notes-------------------------------------- 1661 // Debugging or profiling annotations loosely and sparsely associated 1662 // with some nodes. See Compile::node_notes_at for the accessor. 1663 class Node_Notes { 1664 friend class VMStructs; 1665 JVMState* _jvms; 1666 1667 public: 1668 Node_Notes(JVMState* jvms = NULL) { 1669 _jvms = jvms; 1670 } 1671 1672 JVMState* jvms() { return _jvms; } 1673 void set_jvms(JVMState* x) { _jvms = x; } 1674 1675 // True if there is nothing here. 1676 bool is_clear() { 1677 return (_jvms == NULL); 1678 } 1679 1680 // Make there be nothing here. 1681 void clear() { 1682 _jvms = NULL; 1683 } 1684 1685 // Make a new, clean node notes. 1686 static Node_Notes* make(Compile* C) { 1687 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1688 nn->clear(); 1689 return nn; 1690 } 1691 1692 Node_Notes* clone(Compile* C) { 1693 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1694 (*nn) = (*this); 1695 return nn; 1696 } 1697 1698 // Absorb any information from source. 1699 bool update_from(Node_Notes* source) { 1700 bool changed = false; 1701 if (source != NULL) { 1702 if (source->jvms() != NULL) { 1703 set_jvms(source->jvms()); 1704 changed = true; 1705 } 1706 } 1707 return changed; 1708 } 1709 }; 1710 1711 // Inlined accessors for Compile::node_nodes that require the preceding class: 1712 inline Node_Notes* 1713 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, 1714 int idx, bool can_grow) { 1715 assert(idx >= 0, "oob"); 1716 int block_idx = (idx >> _log2_node_notes_block_size); 1717 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); 1718 if (grow_by >= 0) { 1719 if (!can_grow) return NULL; 1720 grow_node_notes(arr, grow_by + 1); 1721 } 1722 if (arr == NULL) return NULL; 1723 // (Every element of arr is a sub-array of length _node_notes_block_size.) 1724 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); 1725 } 1726 1727 inline bool 1728 Compile::set_node_notes_at(int idx, Node_Notes* value) { 1729 if (value == NULL || value->is_clear()) 1730 return false; // nothing to write => write nothing 1731 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); 1732 assert(loc != NULL, ""); 1733 return loc->update_from(value); 1734 } 1735 1736 1737 //------------------------------TypeNode--------------------------------------- 1738 // Node with a Type constant. 1739 class TypeNode : public Node { 1740 protected: 1741 virtual uint hash() const; // Check the type 1742 virtual bool cmp( const Node &n ) const; 1743 virtual uint size_of() const; // Size is bigger 1744 const Type* const _type; 1745 public: 1746 void set_type(const Type* t) { 1747 assert(t != NULL, "sanity"); 1748 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 1749 *(const Type**)&_type = t; // cast away const-ness 1750 // If this node is in the hash table, make sure it doesn't need a rehash. 1751 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 1752 } 1753 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 1754 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { 1755 init_class_id(Class_Type); 1756 } 1757 virtual const Type* Value(PhaseGVN* phase) const; 1758 virtual const Type *bottom_type() const; 1759 virtual uint ideal_reg() const; 1760 #ifndef PRODUCT 1761 virtual void dump_spec(outputStream *st) const; 1762 virtual void dump_compact_spec(outputStream *st) const; 1763 #endif 1764 }; 1765 1766 #endif // SHARE_OPTO_NODE_HPP