1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_NODE_HPP 26 #define SHARE_VM_OPTO_NODE_HPP 27 28 #include "libadt/port.hpp" 29 #include "libadt/vectset.hpp" 30 #include "opto/compile.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 // Optimization - Graph Style 36 37 38 class AbstractLockNode; 39 class AddNode; 40 class AddPNode; 41 class AliasInfo; 42 class AllocateArrayNode; 43 class AllocateNode; 44 class Block; 45 class Block_Array; 46 class BoolNode; 47 class BoxLockNode; 48 class CMoveNode; 49 class CallDynamicJavaNode; 50 class CallJavaNode; 51 class CallLeafNode; 52 class CallNode; 53 class CallRuntimeNode; 54 class CallStaticJavaNode; 55 class CatchNode; 56 class CatchProjNode; 57 class CheckCastPPNode; 58 class ClearArrayNode; 59 class CmpNode; 60 class CodeBuffer; 61 class ConstraintCastNode; 62 class ConNode; 63 class CountedLoopNode; 64 class CountedLoopEndNode; 65 class DecodeNNode; 66 class EncodePNode; 67 class FastLockNode; 68 class FastUnlockNode; 69 class IfNode; 70 class InitializeNode; 71 class JVMState; 72 class JumpNode; 73 class JumpProjNode; 74 class LoadNode; 75 class LoadStoreNode; 76 class LockNode; 77 class LoopNode; 78 class MachCallDynamicJavaNode; 79 class MachCallJavaNode; 80 class MachCallLeafNode; 81 class MachCallNode; 82 class MachCallRuntimeNode; 83 class MachCallStaticJavaNode; 84 class MachIfNode; 85 class MachNode; 86 class MachNullCheckNode; 87 class MachReturnNode; 88 class MachSafePointNode; 89 class MachSpillCopyNode; 90 class MachTempNode; 91 class Matcher; 92 class MemBarNode; 93 class MemNode; 94 class MergeMemNode; 95 class MulNode; 96 class MultiNode; 97 class MultiBranchNode; 98 class NeverBranchNode; 99 class Node; 100 class Node_Array; 101 class Node_List; 102 class Node_Stack; 103 class NullCheckNode; 104 class OopMap; 105 class ParmNode; 106 class PCTableNode; 107 class PhaseCCP; 108 class PhaseGVN; 109 class PhaseIterGVN; 110 class PhaseRegAlloc; 111 class PhaseTransform; 112 class PhaseValues; 113 class PhiNode; 114 class Pipeline; 115 class ProjNode; 116 class RegMask; 117 class RegionNode; 118 class RootNode; 119 class SafePointNode; 120 class SafePointScalarObjectNode; 121 class StartNode; 122 class State; 123 class StoreNode; 124 class SubNode; 125 class Type; 126 class TypeNode; 127 class UnlockNode; 128 class VectorSet; 129 class IfTrueNode; 130 class IfFalseNode; 131 typedef void (*NFunc)(Node&,void*); 132 extern "C" { 133 typedef int (*C_sort_func_t)(const void *, const void *); 134 } 135 136 // The type of all node counts and indexes. 137 // It must hold at least 16 bits, but must also be fast to load and store. 138 // This type, if less than 32 bits, could limit the number of possible nodes. 139 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) 140 typedef unsigned int node_idx_t; 141 142 143 #ifndef OPTO_DU_ITERATOR_ASSERT 144 #ifdef ASSERT 145 #define OPTO_DU_ITERATOR_ASSERT 1 146 #else 147 #define OPTO_DU_ITERATOR_ASSERT 0 148 #endif 149 #endif //OPTO_DU_ITERATOR_ASSERT 150 151 #if OPTO_DU_ITERATOR_ASSERT 152 class DUIterator; 153 class DUIterator_Fast; 154 class DUIterator_Last; 155 #else 156 typedef uint DUIterator; 157 typedef Node** DUIterator_Fast; 158 typedef Node** DUIterator_Last; 159 #endif 160 161 // Node Sentinel 162 #define NodeSentinel (Node*)-1 163 164 // Unknown count frequency 165 #define COUNT_UNKNOWN (-1.0f) 166 167 //------------------------------Node------------------------------------------- 168 // Nodes define actions in the program. They create values, which have types. 169 // They are both vertices in a directed graph and program primitives. Nodes 170 // are labeled; the label is the "opcode", the primitive function in the lambda 171 // calculus sense that gives meaning to the Node. Node inputs are ordered (so 172 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to 173 // the Node's function. These inputs also define a Type equation for the Node. 174 // Solving these Type equations amounts to doing dataflow analysis. 175 // Control and data are uniformly represented in the graph. Finally, Nodes 176 // have a unique dense integer index which is used to index into side arrays 177 // whenever I have phase-specific information. 178 179 class Node { 180 // Lots of restrictions on cloning Nodes 181 Node(const Node&); // not defined; linker error to use these 182 Node &operator=(const Node &rhs); 183 184 public: 185 friend class Compile; 186 #if OPTO_DU_ITERATOR_ASSERT 187 friend class DUIterator_Common; 188 friend class DUIterator; 189 friend class DUIterator_Fast; 190 friend class DUIterator_Last; 191 #endif 192 193 // Because Nodes come and go, I define an Arena of Node structures to pull 194 // from. This should allow fast access to node creation & deletion. This 195 // field is a local cache of a value defined in some "program fragment" for 196 // which these Nodes are just a part of. 197 198 // New Operator that takes a Compile pointer, this will eventually 199 // be the "new" New operator. 200 inline void* operator new( size_t x, Compile* C) { 201 Node* n = (Node*)C->node_arena()->Amalloc_D(x); 202 #ifdef ASSERT 203 n->_in = (Node**)n; // magic cookie for assertion check 204 #endif 205 n->_out = (Node**)C; 206 return (void*)n; 207 } 208 209 // New Operator that takes a Compile pointer, this will eventually 210 // be the "new" New operator. 211 inline void* operator new( size_t x, Compile* C, int y) { 212 Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*)); 213 n->_in = (Node**)(((char*)n) + x); 214 #ifdef ASSERT 215 n->_in[y-1] = n; // magic cookie for assertion check 216 #endif 217 n->_out = (Node**)C; 218 return (void*)n; 219 } 220 221 // Delete is a NOP 222 void operator delete( void *ptr ) {} 223 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 224 void destruct(); 225 226 // Create a new Node. Required is the number is of inputs required for 227 // semantic correctness. 228 Node( uint required ); 229 230 // Create a new Node with given input edges. 231 // This version requires use of the "edge-count" new. 232 // E.g. new (C,3) FooNode( C, NULL, left, right ); 233 Node( Node *n0 ); 234 Node( Node *n0, Node *n1 ); 235 Node( Node *n0, Node *n1, Node *n2 ); 236 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); 237 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); 238 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); 239 Node( Node *n0, Node *n1, Node *n2, Node *n3, 240 Node *n4, Node *n5, Node *n6 ); 241 242 // Clone an inherited Node given only the base Node type. 243 Node* clone() const; 244 245 // Clone a Node, immediately supplying one or two new edges. 246 // The first and second arguments, if non-null, replace in(1) and in(2), 247 // respectively. 248 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { 249 Node* nn = clone(); 250 if (in1 != NULL) nn->set_req(1, in1); 251 if (in2 != NULL) nn->set_req(2, in2); 252 return nn; 253 } 254 255 private: 256 // Shared setup for the above constructors. 257 // Handles all interactions with Compile::current. 258 // Puts initial values in all Node fields except _idx. 259 // Returns the initial value for _idx, which cannot 260 // be initialized by assignment. 261 inline int Init(int req, Compile* C); 262 263 //----------------- input edge handling 264 protected: 265 friend class PhaseCFG; // Access to address of _in array elements 266 Node **_in; // Array of use-def references to Nodes 267 Node **_out; // Array of def-use references to Nodes 268 269 // Input edges are split into two categories. Required edges are required 270 // for semantic correctness; order is important and NULLs are allowed. 271 // Precedence edges are used to help determine execution order and are 272 // added, e.g., for scheduling purposes. They are unordered and not 273 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 274 // are required, from _cnt to _max-1 are precedence edges. 275 node_idx_t _cnt; // Total number of required Node inputs. 276 277 node_idx_t _max; // Actual length of input array. 278 279 // Output edges are an unordered list of def-use edges which exactly 280 // correspond to required input edges which point from other nodes 281 // to this one. Thus the count of the output edges is the number of 282 // users of this node. 283 node_idx_t _outcnt; // Total number of Node outputs. 284 285 node_idx_t _outmax; // Actual length of output array. 286 287 // Grow the actual input array to the next larger power-of-2 bigger than len. 288 void grow( uint len ); 289 // Grow the output array to the next larger power-of-2 bigger than len. 290 void out_grow( uint len ); 291 292 public: 293 // Each Node is assigned a unique small/dense number. This number is used 294 // to index into auxiliary arrays of data and bitvectors. 295 // It is declared const to defend against inadvertant assignment, 296 // since it is used by clients as a naked field. 297 const node_idx_t _idx; 298 299 // Get the (read-only) number of input edges 300 uint req() const { return _cnt; } 301 uint len() const { return _max; } 302 // Get the (read-only) number of output edges 303 uint outcnt() const { return _outcnt; } 304 305 #if OPTO_DU_ITERATOR_ASSERT 306 // Iterate over the out-edges of this node. Deletions are illegal. 307 inline DUIterator outs() const; 308 // Use this when the out array might have changed to suppress asserts. 309 inline DUIterator& refresh_out_pos(DUIterator& i) const; 310 // Does the node have an out at this position? (Used for iteration.) 311 inline bool has_out(DUIterator& i) const; 312 inline Node* out(DUIterator& i) const; 313 // Iterate over the out-edges of this node. All changes are illegal. 314 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; 315 inline Node* fast_out(DUIterator_Fast& i) const; 316 // Iterate over the out-edges of this node, deleting one at a time. 317 inline DUIterator_Last last_outs(DUIterator_Last& min) const; 318 inline Node* last_out(DUIterator_Last& i) const; 319 // The inline bodies of all these methods are after the iterator definitions. 320 #else 321 // Iterate over the out-edges of this node. Deletions are illegal. 322 // This iteration uses integral indexes, to decouple from array reallocations. 323 DUIterator outs() const { return 0; } 324 // Use this when the out array might have changed to suppress asserts. 325 DUIterator refresh_out_pos(DUIterator i) const { return i; } 326 327 // Reference to the i'th output Node. Error if out of bounds. 328 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } 329 // Does the node have an out at this position? (Used for iteration.) 330 bool has_out(DUIterator i) const { return i < _outcnt; } 331 332 // Iterate over the out-edges of this node. All changes are illegal. 333 // This iteration uses a pointer internal to the out array. 334 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { 335 Node** out = _out; 336 // Assign a limit pointer to the reference argument: 337 max = out + (ptrdiff_t)_outcnt; 338 // Return the base pointer: 339 return out; 340 } 341 Node* fast_out(DUIterator_Fast i) const { return *i; } 342 // Iterate over the out-edges of this node, deleting one at a time. 343 // This iteration uses a pointer internal to the out array. 344 DUIterator_Last last_outs(DUIterator_Last& min) const { 345 Node** out = _out; 346 // Assign a limit pointer to the reference argument: 347 min = out; 348 // Return the pointer to the start of the iteration: 349 return out + (ptrdiff_t)_outcnt - 1; 350 } 351 Node* last_out(DUIterator_Last i) const { return *i; } 352 #endif 353 354 // Reference to the i'th input Node. Error if out of bounds. 355 Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; } 356 // Reference to the i'th output Node. Error if out of bounds. 357 // Use this accessor sparingly. We are going trying to use iterators instead. 358 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } 359 // Return the unique out edge. 360 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } 361 // Delete out edge at position 'i' by moving last out edge to position 'i' 362 void raw_del_out(uint i) { 363 assert(i < _outcnt,"oob"); 364 assert(_outcnt > 0,"oob"); 365 #if OPTO_DU_ITERATOR_ASSERT 366 // Record that a change happened here. 367 debug_only(_last_del = _out[i]; ++_del_tick); 368 #endif 369 _out[i] = _out[--_outcnt]; 370 // Smash the old edge so it can't be used accidentally. 371 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 372 } 373 374 #ifdef ASSERT 375 bool is_dead() const; 376 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) 377 #endif 378 379 // Set a required input edge, also updates corresponding output edge 380 void add_req( Node *n ); // Append a NEW required input 381 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). 382 void del_req( uint idx ); // Delete required edge & compact 383 void ins_req( uint i, Node *n ); // Insert a NEW required input 384 void set_req( uint i, Node *n ) { 385 assert( is_not_dead(n), "can not use dead node"); 386 assert( i < _cnt, "oob"); 387 assert( !VerifyHashTableKeys || _hash_lock == 0, 388 "remove node from hash table before modifying it"); 389 Node** p = &_in[i]; // cache this._in, across the del_out call 390 if (*p != NULL) (*p)->del_out((Node *)this); 391 (*p) = n; 392 if (n != NULL) n->add_out((Node *)this); 393 } 394 // Light version of set_req() to init inputs after node creation. 395 void init_req( uint i, Node *n ) { 396 assert( i == 0 && this == n || 397 is_not_dead(n), "can not use dead node"); 398 assert( i < _cnt, "oob"); 399 assert( !VerifyHashTableKeys || _hash_lock == 0, 400 "remove node from hash table before modifying it"); 401 assert( _in[i] == NULL, "sanity"); 402 _in[i] = n; 403 if (n != NULL) n->add_out((Node *)this); 404 } 405 // Find first occurrence of n among my edges: 406 int find_edge(Node* n); 407 int replace_edge(Node* old, Node* neww); 408 // NULL out all inputs to eliminate incoming Def-Use edges. 409 // Return the number of edges between 'n' and 'this' 410 int disconnect_inputs(Node *n); 411 412 // Quickly, return true if and only if I am Compile::current()->top(). 413 bool is_top() const { 414 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); 415 return (_out == NULL); 416 } 417 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) 418 void setup_is_top(); 419 420 // Strip away casting. (It is depth-limited.) 421 Node* uncast() const; 422 423 private: 424 static Node* uncast_helper(const Node* n); 425 426 // Add an output edge to the end of the list 427 void add_out( Node *n ) { 428 if (is_top()) return; 429 if( _outcnt == _outmax ) out_grow(_outcnt); 430 _out[_outcnt++] = n; 431 } 432 // Delete an output edge 433 void del_out( Node *n ) { 434 if (is_top()) return; 435 Node** outp = &_out[_outcnt]; 436 // Find and remove n 437 do { 438 assert(outp > _out, "Missing Def-Use edge"); 439 } while (*--outp != n); 440 *outp = _out[--_outcnt]; 441 // Smash the old edge so it can't be used accidentally. 442 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 443 // Record that a change happened here. 444 #if OPTO_DU_ITERATOR_ASSERT 445 debug_only(_last_del = n; ++_del_tick); 446 #endif 447 } 448 449 public: 450 // Globally replace this node by a given new node, updating all uses. 451 void replace_by(Node* new_node); 452 // Globally replace this node by a given new node, updating all uses 453 // and cutting input edges of old node. 454 void subsume_by(Node* new_node) { 455 replace_by(new_node); 456 disconnect_inputs(NULL); 457 } 458 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); 459 // Find the one non-null required input. RegionNode only 460 Node *nonnull_req() const; 461 // Add or remove precedence edges 462 void add_prec( Node *n ); 463 void rm_prec( uint i ); 464 void set_prec( uint i, Node *n ) { 465 assert( is_not_dead(n), "can not use dead node"); 466 assert( i >= _cnt, "not a precedence edge"); 467 if (_in[i] != NULL) _in[i]->del_out((Node *)this); 468 _in[i] = n; 469 if (n != NULL) n->add_out((Node *)this); 470 } 471 // Set this node's index, used by cisc_version to replace current node 472 void set_idx(uint new_idx) { 473 const node_idx_t* ref = &_idx; 474 *(node_idx_t*)ref = new_idx; 475 } 476 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) 477 void swap_edges(uint i1, uint i2) { 478 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 479 // Def-Use info is unchanged 480 Node* n1 = in(i1); 481 Node* n2 = in(i2); 482 _in[i1] = n2; 483 _in[i2] = n1; 484 // If this node is in the hash table, make sure it doesn't need a rehash. 485 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); 486 } 487 488 // Iterators over input Nodes for a Node X are written as: 489 // for( i = 0; i < X.req(); i++ ) ... X[i] ... 490 // NOTE: Required edges can contain embedded NULL pointers. 491 492 //----------------- Other Node Properties 493 494 // Generate class id for some ideal nodes to avoid virtual query 495 // methods is_<Node>(). 496 // Class id is the set of bits corresponded to the node class and all its 497 // super classes so that queries for super classes are also valid. 498 // Subclasses of the same super class have different assigned bit 499 // (the third parameter in the macro DEFINE_CLASS_ID). 500 // Classes with deeper hierarchy are declared first. 501 // Classes with the same hierarchy depth are sorted by usage frequency. 502 // 503 // The query method masks the bits to cut off bits of subclasses 504 // and then compare the result with the class id 505 // (see the macro DEFINE_CLASS_QUERY below). 506 // 507 // Class_MachCall=30, ClassMask_MachCall=31 508 // 12 8 4 0 509 // 0 0 0 0 0 0 0 0 1 1 1 1 0 510 // | | | | 511 // | | | Bit_Mach=2 512 // | | Bit_MachReturn=4 513 // | Bit_MachSafePoint=8 514 // Bit_MachCall=16 515 // 516 // Class_CountedLoop=56, ClassMask_CountedLoop=63 517 // 12 8 4 0 518 // 0 0 0 0 0 0 0 1 1 1 0 0 0 519 // | | | 520 // | | Bit_Region=8 521 // | Bit_Loop=16 522 // Bit_CountedLoop=32 523 524 #define DEFINE_CLASS_ID(cl, supcl, subn) \ 525 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ 526 Class_##cl = Class_##supcl + Bit_##cl , \ 527 ClassMask_##cl = ((Bit_##cl << 1) - 1) , 528 529 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods 530 // so that it's values fits into 16 bits. 531 enum NodeClasses { 532 Bit_Node = 0x0000, 533 Class_Node = 0x0000, 534 ClassMask_Node = 0xFFFF, 535 536 DEFINE_CLASS_ID(Multi, Node, 0) 537 DEFINE_CLASS_ID(SafePoint, Multi, 0) 538 DEFINE_CLASS_ID(Call, SafePoint, 0) 539 DEFINE_CLASS_ID(CallJava, Call, 0) 540 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) 541 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) 542 DEFINE_CLASS_ID(CallRuntime, Call, 1) 543 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) 544 DEFINE_CLASS_ID(Allocate, Call, 2) 545 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) 546 DEFINE_CLASS_ID(AbstractLock, Call, 3) 547 DEFINE_CLASS_ID(Lock, AbstractLock, 0) 548 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) 549 DEFINE_CLASS_ID(MultiBranch, Multi, 1) 550 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) 551 DEFINE_CLASS_ID(Catch, PCTable, 0) 552 DEFINE_CLASS_ID(Jump, PCTable, 1) 553 DEFINE_CLASS_ID(If, MultiBranch, 1) 554 DEFINE_CLASS_ID(CountedLoopEnd, If, 0) 555 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) 556 DEFINE_CLASS_ID(Start, Multi, 2) 557 DEFINE_CLASS_ID(MemBar, Multi, 3) 558 DEFINE_CLASS_ID(Initialize, MemBar, 0) 559 560 DEFINE_CLASS_ID(Mach, Node, 1) 561 DEFINE_CLASS_ID(MachReturn, Mach, 0) 562 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) 563 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) 564 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) 565 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) 566 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 567 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 568 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 569 DEFINE_CLASS_ID(MachSpillCopy, Mach, 1) 570 DEFINE_CLASS_ID(MachNullCheck, Mach, 2) 571 DEFINE_CLASS_ID(MachIf, Mach, 3) 572 DEFINE_CLASS_ID(MachTemp, Mach, 4) 573 574 DEFINE_CLASS_ID(Proj, Node, 2) 575 DEFINE_CLASS_ID(CatchProj, Proj, 0) 576 DEFINE_CLASS_ID(JumpProj, Proj, 1) 577 DEFINE_CLASS_ID(IfTrue, Proj, 2) 578 DEFINE_CLASS_ID(IfFalse, Proj, 3) 579 DEFINE_CLASS_ID(Parm, Proj, 4) 580 581 DEFINE_CLASS_ID(Region, Node, 3) 582 DEFINE_CLASS_ID(Loop, Region, 0) 583 DEFINE_CLASS_ID(Root, Loop, 0) 584 DEFINE_CLASS_ID(CountedLoop, Loop, 1) 585 586 DEFINE_CLASS_ID(Sub, Node, 4) 587 DEFINE_CLASS_ID(Cmp, Sub, 0) 588 DEFINE_CLASS_ID(FastLock, Cmp, 0) 589 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) 590 591 DEFINE_CLASS_ID(Type, Node, 5) 592 DEFINE_CLASS_ID(Phi, Type, 0) 593 DEFINE_CLASS_ID(ConstraintCast, Type, 1) 594 DEFINE_CLASS_ID(CheckCastPP, Type, 2) 595 DEFINE_CLASS_ID(CMove, Type, 3) 596 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) 597 DEFINE_CLASS_ID(DecodeN, Type, 5) 598 DEFINE_CLASS_ID(EncodeP, Type, 6) 599 600 DEFINE_CLASS_ID(Mem, Node, 6) 601 DEFINE_CLASS_ID(Load, Mem, 0) 602 DEFINE_CLASS_ID(Store, Mem, 1) 603 DEFINE_CLASS_ID(LoadStore, Mem, 2) 604 605 DEFINE_CLASS_ID(MergeMem, Node, 7) 606 DEFINE_CLASS_ID(Bool, Node, 8) 607 DEFINE_CLASS_ID(AddP, Node, 9) 608 DEFINE_CLASS_ID(BoxLock, Node, 10) 609 DEFINE_CLASS_ID(Add, Node, 11) 610 DEFINE_CLASS_ID(Mul, Node, 12) 611 DEFINE_CLASS_ID(ClearArray, Node, 13) 612 613 _max_classes = ClassMask_ClearArray 614 }; 615 #undef DEFINE_CLASS_ID 616 617 // Flags are sorted by usage frequency. 618 enum NodeFlags { 619 Flag_is_Copy = 0x01, // should be first bit to avoid shift 620 Flag_is_Call = Flag_is_Copy << 1, 621 Flag_rematerialize = Flag_is_Call << 1, 622 Flag_needs_anti_dependence_check = Flag_rematerialize << 1, 623 Flag_is_macro = Flag_needs_anti_dependence_check << 1, 624 Flag_is_Con = Flag_is_macro << 1, 625 Flag_is_cisc_alternate = Flag_is_Con << 1, 626 Flag_is_Branch = Flag_is_cisc_alternate << 1, 627 Flag_is_block_start = Flag_is_Branch << 1, 628 Flag_is_Goto = Flag_is_block_start << 1, 629 Flag_is_dead_loop_safe = Flag_is_Goto << 1, 630 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, 631 Flag_is_safepoint_node = Flag_may_be_short_branch << 1, 632 Flag_is_pc_relative = Flag_is_safepoint_node << 1, 633 Flag_is_Vector = Flag_is_pc_relative << 1, 634 _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination 635 }; 636 637 private: 638 jushort _class_id; 639 jushort _flags; 640 641 protected: 642 // These methods should be called from constructors only. 643 void init_class_id(jushort c) { 644 assert(c <= _max_classes, "invalid node class"); 645 _class_id = c; // cast out const 646 } 647 void init_flags(jushort fl) { 648 assert(fl <= _max_flags, "invalid node flag"); 649 _flags |= fl; 650 } 651 void clear_flag(jushort fl) { 652 assert(fl <= _max_flags, "invalid node flag"); 653 _flags &= ~fl; 654 } 655 656 public: 657 const jushort class_id() const { return _class_id; } 658 659 const jushort flags() const { return _flags; } 660 661 // Return a dense integer opcode number 662 virtual int Opcode() const; 663 664 // Virtual inherited Node size 665 virtual uint size_of() const; 666 667 // Other interesting Node properties 668 669 // Special case: is_Call() returns true for both CallNode and MachCallNode. 670 bool is_Call() const { 671 return (_flags & Flag_is_Call) != 0; 672 } 673 674 CallNode* isa_Call() const { 675 return is_Call() ? as_Call() : NULL; 676 } 677 678 CallNode *as_Call() const { // Only for CallNode (not for MachCallNode) 679 assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class"); 680 return (CallNode*)this; 681 } 682 683 #define DEFINE_CLASS_QUERY(type) \ 684 bool is_##type() const { \ 685 return ((_class_id & ClassMask_##type) == Class_##type); \ 686 } \ 687 type##Node *as_##type() const { \ 688 assert(is_##type(), "invalid node class"); \ 689 return (type##Node*)this; \ 690 } \ 691 type##Node* isa_##type() const { \ 692 return (is_##type()) ? as_##type() : NULL; \ 693 } 694 695 DEFINE_CLASS_QUERY(AbstractLock) 696 DEFINE_CLASS_QUERY(Add) 697 DEFINE_CLASS_QUERY(AddP) 698 DEFINE_CLASS_QUERY(Allocate) 699 DEFINE_CLASS_QUERY(AllocateArray) 700 DEFINE_CLASS_QUERY(Bool) 701 DEFINE_CLASS_QUERY(BoxLock) 702 DEFINE_CLASS_QUERY(CallDynamicJava) 703 DEFINE_CLASS_QUERY(CallJava) 704 DEFINE_CLASS_QUERY(CallLeaf) 705 DEFINE_CLASS_QUERY(CallRuntime) 706 DEFINE_CLASS_QUERY(CallStaticJava) 707 DEFINE_CLASS_QUERY(Catch) 708 DEFINE_CLASS_QUERY(CatchProj) 709 DEFINE_CLASS_QUERY(CheckCastPP) 710 DEFINE_CLASS_QUERY(ConstraintCast) 711 DEFINE_CLASS_QUERY(ClearArray) 712 DEFINE_CLASS_QUERY(CMove) 713 DEFINE_CLASS_QUERY(Cmp) 714 DEFINE_CLASS_QUERY(CountedLoop) 715 DEFINE_CLASS_QUERY(CountedLoopEnd) 716 DEFINE_CLASS_QUERY(DecodeN) 717 DEFINE_CLASS_QUERY(EncodeP) 718 DEFINE_CLASS_QUERY(FastLock) 719 DEFINE_CLASS_QUERY(FastUnlock) 720 DEFINE_CLASS_QUERY(If) 721 DEFINE_CLASS_QUERY(IfFalse) 722 DEFINE_CLASS_QUERY(IfTrue) 723 DEFINE_CLASS_QUERY(Initialize) 724 DEFINE_CLASS_QUERY(Jump) 725 DEFINE_CLASS_QUERY(JumpProj) 726 DEFINE_CLASS_QUERY(Load) 727 DEFINE_CLASS_QUERY(LoadStore) 728 DEFINE_CLASS_QUERY(Lock) 729 DEFINE_CLASS_QUERY(Loop) 730 DEFINE_CLASS_QUERY(Mach) 731 DEFINE_CLASS_QUERY(MachCall) 732 DEFINE_CLASS_QUERY(MachCallDynamicJava) 733 DEFINE_CLASS_QUERY(MachCallJava) 734 DEFINE_CLASS_QUERY(MachCallLeaf) 735 DEFINE_CLASS_QUERY(MachCallRuntime) 736 DEFINE_CLASS_QUERY(MachCallStaticJava) 737 DEFINE_CLASS_QUERY(MachIf) 738 DEFINE_CLASS_QUERY(MachNullCheck) 739 DEFINE_CLASS_QUERY(MachReturn) 740 DEFINE_CLASS_QUERY(MachSafePoint) 741 DEFINE_CLASS_QUERY(MachSpillCopy) 742 DEFINE_CLASS_QUERY(MachTemp) 743 DEFINE_CLASS_QUERY(Mem) 744 DEFINE_CLASS_QUERY(MemBar) 745 DEFINE_CLASS_QUERY(MergeMem) 746 DEFINE_CLASS_QUERY(Mul) 747 DEFINE_CLASS_QUERY(Multi) 748 DEFINE_CLASS_QUERY(MultiBranch) 749 DEFINE_CLASS_QUERY(Parm) 750 DEFINE_CLASS_QUERY(PCTable) 751 DEFINE_CLASS_QUERY(Phi) 752 DEFINE_CLASS_QUERY(Proj) 753 DEFINE_CLASS_QUERY(Region) 754 DEFINE_CLASS_QUERY(Root) 755 DEFINE_CLASS_QUERY(SafePoint) 756 DEFINE_CLASS_QUERY(SafePointScalarObject) 757 DEFINE_CLASS_QUERY(Start) 758 DEFINE_CLASS_QUERY(Store) 759 DEFINE_CLASS_QUERY(Sub) 760 DEFINE_CLASS_QUERY(Type) 761 DEFINE_CLASS_QUERY(Unlock) 762 763 #undef DEFINE_CLASS_QUERY 764 765 // duplicate of is_MachSpillCopy() 766 bool is_SpillCopy () const { 767 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); 768 } 769 770 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } 771 bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; } 772 // The data node which is safe to leave in dead loop during IGVN optimization. 773 bool is_dead_loop_safe() const { 774 return is_Phi() || (is_Proj() && in(0) == NULL) || 775 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && 776 (!is_Proj() || !in(0)->is_Allocate())); 777 } 778 779 // is_Copy() returns copied edge index (0 or 1) 780 uint is_Copy() const { return (_flags & Flag_is_Copy); } 781 782 virtual bool is_CFG() const { return false; } 783 784 // If this node is control-dependent on a test, can it be 785 // rerouted to a dominating equivalent test? This is usually 786 // true of non-CFG nodes, but can be false for operations which 787 // depend for their correct sequencing on more than one test. 788 // (In that case, hoisting to a dominating test may silently 789 // skip some other important test.) 790 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; 791 792 // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd' 793 bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; } 794 795 // When building basic blocks, I need to have a notion of block beginning 796 // Nodes, next block selector Nodes (block enders), and next block 797 // projections. These calls need to work on their machine equivalents. The 798 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. 799 bool is_block_start() const { 800 if ( is_Region() ) 801 return this == (const Node*)in(0); 802 else 803 return (_flags & Flag_is_block_start) != 0; 804 } 805 806 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, 807 // Goto and Return. This call also returns the block ending Node. 808 virtual const Node *is_block_proj() const; 809 810 // The node is a "macro" node which needs to be expanded before matching 811 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } 812 813 // Value is a vector of primitive values 814 bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; } 815 816 //----------------- Optimization 817 818 // Get the worst-case Type output for this Node. 819 virtual const class Type *bottom_type() const; 820 821 // If we find a better type for a node, try to record it permanently. 822 // Return true if this node actually changed. 823 // Be sure to do the hash_delete game in the "rehash" variant. 824 void raise_bottom_type(const Type* new_type); 825 826 // Get the address type with which this node uses and/or defs memory, 827 // or NULL if none. The address type is conservatively wide. 828 // Returns non-null for calls, membars, loads, stores, etc. 829 // Returns TypePtr::BOTTOM if the node touches memory "broadly". 830 virtual const class TypePtr *adr_type() const { return NULL; } 831 832 // Return an existing node which computes the same function as this node. 833 // The optimistic combined algorithm requires this to return a Node which 834 // is a small number of steps away (e.g., one of my inputs). 835 virtual Node *Identity( PhaseTransform *phase ); 836 837 // Return the set of values this Node can take on at runtime. 838 virtual const Type *Value( PhaseTransform *phase ) const; 839 840 // Return a node which is more "ideal" than the current node. 841 // The invariants on this call are subtle. If in doubt, read the 842 // treatise in node.cpp above the default implemention AND TEST WITH 843 // +VerifyIterativeGVN! 844 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 845 846 // Some nodes have specific Ideal subgraph transformations only if they are 847 // unique users of specific nodes. Such nodes should be put on IGVN worklist 848 // for the transformations to happen. 849 bool has_special_unique_user() const; 850 851 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 852 Node* find_exact_control(Node* ctrl); 853 854 // Check if 'this' node dominates or equal to 'sub'. 855 bool dominates(Node* sub, Node_List &nlist); 856 857 protected: 858 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); 859 public: 860 861 // Idealize graph, using DU info. Done after constant propagation 862 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 863 864 // See if there is valid pipeline info 865 static const Pipeline *pipeline_class(); 866 virtual const Pipeline *pipeline() const; 867 868 // Compute the latency from the def to this instruction of the ith input node 869 uint latency(uint i); 870 871 // Hash & compare functions, for pessimistic value numbering 872 873 // If the hash function returns the special sentinel value NO_HASH, 874 // the node is guaranteed never to compare equal to any other node. 875 // If we accidentally generate a hash with value NO_HASH the node 876 // won't go into the table and we'll lose a little optimization. 877 enum { NO_HASH = 0 }; 878 virtual uint hash() const; 879 virtual uint cmp( const Node &n ) const; 880 881 // Operation appears to be iteratively computed (such as an induction variable) 882 // It is possible for this operation to return false for a loop-varying 883 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 884 bool is_iteratively_computed(); 885 886 // Determine if a node is Counted loop induction variable. 887 // The method is defined in loopnode.cpp. 888 const Node* is_loop_iv() const; 889 890 // Return a node with opcode "opc" and same inputs as "this" if one can 891 // be found; Otherwise return NULL; 892 Node* find_similar(int opc); 893 894 // Return the unique control out if only one. Null if none or more than one. 895 Node* unique_ctrl_out(); 896 897 //----------------- Code Generation 898 899 // Ideal register class for Matching. Zero means unmatched instruction 900 // (these are cloned instead of converted to machine nodes). 901 virtual uint ideal_reg() const; 902 903 static const uint NotAMachineReg; // must be > max. machine register 904 905 // Do we Match on this edge index or not? Generally false for Control 906 // and true for everything else. Weird for calls & returns. 907 virtual uint match_edge(uint idx) const; 908 909 // Register class output is returned in 910 virtual const RegMask &out_RegMask() const; 911 // Register class input is expected in 912 virtual const RegMask &in_RegMask(uint) const; 913 // Should we clone rather than spill this instruction? 914 bool rematerialize() const; 915 916 // Return JVM State Object if this Node carries debug info, or NULL otherwise 917 virtual JVMState* jvms() const; 918 919 // Print as assembly 920 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; 921 // Emit bytes starting at parameter 'ptr' 922 // Bump 'ptr' by the number of output bytes 923 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; 924 // Size of instruction in bytes 925 virtual uint size(PhaseRegAlloc *ra_) const; 926 927 // Convenience function to extract an integer constant from a node. 928 // If it is not an integer constant (either Con, CastII, or Mach), 929 // return value_if_unknown. 930 jint find_int_con(jint value_if_unknown) const { 931 const TypeInt* t = find_int_type(); 932 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 933 } 934 // Return the constant, knowing it is an integer constant already 935 jint get_int() const { 936 const TypeInt* t = find_int_type(); 937 guarantee(t != NULL, "must be con"); 938 return t->get_con(); 939 } 940 // Here's where the work is done. Can produce non-constant int types too. 941 const TypeInt* find_int_type() const; 942 943 // Same thing for long (and intptr_t, via type.hpp): 944 jlong get_long() const { 945 const TypeLong* t = find_long_type(); 946 guarantee(t != NULL, "must be con"); 947 return t->get_con(); 948 } 949 jlong find_long_con(jint value_if_unknown) const { 950 const TypeLong* t = find_long_type(); 951 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 952 } 953 const TypeLong* find_long_type() const; 954 955 // These guys are called by code generated by ADLC: 956 intptr_t get_ptr() const; 957 intptr_t get_narrowcon() const; 958 jdouble getd() const; 959 jfloat getf() const; 960 961 // Nodes which are pinned into basic blocks 962 virtual bool pinned() const { return false; } 963 964 // Nodes which use memory without consuming it, hence need antidependences 965 // More specifically, needs_anti_dependence_check returns true iff the node 966 // (a) does a load, and (b) does not perform a store (except perhaps to a 967 // stack slot or some other unaliased location). 968 bool needs_anti_dependence_check() const; 969 970 // Return which operand this instruction may cisc-spill. In other words, 971 // return operand position that can convert from reg to memory access 972 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } 973 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } 974 975 //----------------- Graph walking 976 public: 977 // Walk and apply member functions recursively. 978 // Supplied (this) pointer is root. 979 void walk(NFunc pre, NFunc post, void *env); 980 static void nop(Node &, void*); // Dummy empty function 981 static void packregion( Node &n, void* ); 982 private: 983 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); 984 985 //----------------- Printing, etc 986 public: 987 #ifndef PRODUCT 988 Node* find(int idx) const; // Search the graph for the given idx. 989 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. 990 void dump() const; // Print this node, 991 void dump(int depth) const; // Print this node, recursively to depth d 992 void dump_ctrl(int depth) const; // Print control nodes, to depth d 993 virtual void dump_req() const; // Print required-edge info 994 virtual void dump_prec() const; // Print precedence-edge info 995 virtual void dump_out() const; // Print the output edge info 996 virtual void dump_spec(outputStream *st) const {}; // Print per-node info 997 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges 998 void verify() const; // Check Def-Use info for my subgraph 999 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); 1000 1001 // This call defines a class-unique string used to identify class instances 1002 virtual const char *Name() const; 1003 1004 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) 1005 // RegMask Print Functions 1006 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } 1007 void dump_out_regmask() { out_RegMask().dump(); } 1008 static int _in_dump_cnt; 1009 static bool in_dump() { return _in_dump_cnt > 0; } 1010 void fast_dump() const { 1011 tty->print("%4d: %-17s", _idx, Name()); 1012 for (uint i = 0; i < len(); i++) 1013 if (in(i)) 1014 tty->print(" %4d", in(i)->_idx); 1015 else 1016 tty->print(" NULL"); 1017 tty->print("\n"); 1018 } 1019 #endif 1020 #ifdef ASSERT 1021 void verify_construction(); 1022 bool verify_jvms(const JVMState* jvms) const; 1023 int _debug_idx; // Unique value assigned to every node. 1024 int debug_idx() const { return _debug_idx; } 1025 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } 1026 1027 Node* _debug_orig; // Original version of this, if any. 1028 Node* debug_orig() const { return _debug_orig; } 1029 void set_debug_orig(Node* orig); // _debug_orig = orig 1030 1031 int _hash_lock; // Barrier to modifications of nodes in the hash table 1032 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } 1033 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } 1034 1035 static void init_NodeProperty(); 1036 1037 #if OPTO_DU_ITERATOR_ASSERT 1038 const Node* _last_del; // The last deleted node. 1039 uint _del_tick; // Bumped when a deletion happens.. 1040 #endif 1041 #endif 1042 }; 1043 1044 //----------------------------------------------------------------------------- 1045 // Iterators over DU info, and associated Node functions. 1046 1047 #if OPTO_DU_ITERATOR_ASSERT 1048 1049 // Common code for assertion checking on DU iterators. 1050 class DUIterator_Common VALUE_OBJ_CLASS_SPEC { 1051 #ifdef ASSERT 1052 protected: 1053 bool _vdui; // cached value of VerifyDUIterators 1054 const Node* _node; // the node containing the _out array 1055 uint _outcnt; // cached node->_outcnt 1056 uint _del_tick; // cached node->_del_tick 1057 Node* _last; // last value produced by the iterator 1058 1059 void sample(const Node* node); // used by c'tor to set up for verifies 1060 void verify(const Node* node, bool at_end_ok = false); 1061 void verify_resync(); 1062 void reset(const DUIterator_Common& that); 1063 1064 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators 1065 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } 1066 #else 1067 #define I_VDUI_ONLY(i,x) { } 1068 #endif //ASSERT 1069 }; 1070 1071 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) 1072 1073 // Default DU iterator. Allows appends onto the out array. 1074 // Allows deletion from the out array only at the current point. 1075 // Usage: 1076 // for (DUIterator i = x->outs(); x->has_out(i); i++) { 1077 // Node* y = x->out(i); 1078 // ... 1079 // } 1080 // Compiles in product mode to a unsigned integer index, which indexes 1081 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate 1082 // also reloads x->_outcnt. If you delete, you must perform "--i" just 1083 // before continuing the loop. You must delete only the last-produced 1084 // edge. You must delete only a single copy of the last-produced edge, 1085 // or else you must delete all copies at once (the first time the edge 1086 // is produced by the iterator). 1087 class DUIterator : public DUIterator_Common { 1088 friend class Node; 1089 1090 // This is the index which provides the product-mode behavior. 1091 // Whatever the product-mode version of the system does to the 1092 // DUI index is done to this index. All other fields in 1093 // this class are used only for assertion checking. 1094 uint _idx; 1095 1096 #ifdef ASSERT 1097 uint _refresh_tick; // Records the refresh activity. 1098 1099 void sample(const Node* node); // Initialize _refresh_tick etc. 1100 void verify(const Node* node, bool at_end_ok = false); 1101 void verify_increment(); // Verify an increment operation. 1102 void verify_resync(); // Verify that we can back up over a deletion. 1103 void verify_finish(); // Verify that the loop terminated properly. 1104 void refresh(); // Resample verification info. 1105 void reset(const DUIterator& that); // Resample after assignment. 1106 #endif 1107 1108 DUIterator(const Node* node, int dummy_to_avoid_conversion) 1109 { _idx = 0; debug_only(sample(node)); } 1110 1111 public: 1112 // initialize to garbage; clear _vdui to disable asserts 1113 DUIterator() 1114 { /*initialize to garbage*/ debug_only(_vdui = false); } 1115 1116 void operator++(int dummy_to_specify_postfix_op) 1117 { _idx++; VDUI_ONLY(verify_increment()); } 1118 1119 void operator--() 1120 { VDUI_ONLY(verify_resync()); --_idx; } 1121 1122 ~DUIterator() 1123 { VDUI_ONLY(verify_finish()); } 1124 1125 void operator=(const DUIterator& that) 1126 { _idx = that._idx; debug_only(reset(that)); } 1127 }; 1128 1129 DUIterator Node::outs() const 1130 { return DUIterator(this, 0); } 1131 DUIterator& Node::refresh_out_pos(DUIterator& i) const 1132 { I_VDUI_ONLY(i, i.refresh()); return i; } 1133 bool Node::has_out(DUIterator& i) const 1134 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } 1135 Node* Node::out(DUIterator& i) const 1136 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } 1137 1138 1139 // Faster DU iterator. Disallows insertions into the out array. 1140 // Allows deletion from the out array only at the current point. 1141 // Usage: 1142 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { 1143 // Node* y = x->fast_out(i); 1144 // ... 1145 // } 1146 // Compiles in product mode to raw Node** pointer arithmetic, with 1147 // no reloading of pointers from the original node x. If you delete, 1148 // you must perform "--i; --imax" just before continuing the loop. 1149 // If you delete multiple copies of the same edge, you must decrement 1150 // imax, but not i, multiple times: "--i, imax -= num_edges". 1151 class DUIterator_Fast : public DUIterator_Common { 1152 friend class Node; 1153 friend class DUIterator_Last; 1154 1155 // This is the pointer which provides the product-mode behavior. 1156 // Whatever the product-mode version of the system does to the 1157 // DUI pointer is done to this pointer. All other fields in 1158 // this class are used only for assertion checking. 1159 Node** _outp; 1160 1161 #ifdef ASSERT 1162 void verify(const Node* node, bool at_end_ok = false); 1163 void verify_limit(); 1164 void verify_resync(); 1165 void verify_relimit(uint n); 1166 void reset(const DUIterator_Fast& that); 1167 #endif 1168 1169 // Note: offset must be signed, since -1 is sometimes passed 1170 DUIterator_Fast(const Node* node, ptrdiff_t offset) 1171 { _outp = node->_out + offset; debug_only(sample(node)); } 1172 1173 public: 1174 // initialize to garbage; clear _vdui to disable asserts 1175 DUIterator_Fast() 1176 { /*initialize to garbage*/ debug_only(_vdui = false); } 1177 1178 void operator++(int dummy_to_specify_postfix_op) 1179 { _outp++; VDUI_ONLY(verify(_node, true)); } 1180 1181 void operator--() 1182 { VDUI_ONLY(verify_resync()); --_outp; } 1183 1184 void operator-=(uint n) // applied to the limit only 1185 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } 1186 1187 bool operator<(DUIterator_Fast& limit) { 1188 I_VDUI_ONLY(*this, this->verify(_node, true)); 1189 I_VDUI_ONLY(limit, limit.verify_limit()); 1190 return _outp < limit._outp; 1191 } 1192 1193 void operator=(const DUIterator_Fast& that) 1194 { _outp = that._outp; debug_only(reset(that)); } 1195 }; 1196 1197 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { 1198 // Assign a limit pointer to the reference argument: 1199 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); 1200 // Return the base pointer: 1201 return DUIterator_Fast(this, 0); 1202 } 1203 Node* Node::fast_out(DUIterator_Fast& i) const { 1204 I_VDUI_ONLY(i, i.verify(this)); 1205 return debug_only(i._last=) *i._outp; 1206 } 1207 1208 1209 // Faster DU iterator. Requires each successive edge to be removed. 1210 // Does not allow insertion of any edges. 1211 // Usage: 1212 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { 1213 // Node* y = x->last_out(i); 1214 // ... 1215 // } 1216 // Compiles in product mode to raw Node** pointer arithmetic, with 1217 // no reloading of pointers from the original node x. 1218 class DUIterator_Last : private DUIterator_Fast { 1219 friend class Node; 1220 1221 #ifdef ASSERT 1222 void verify(const Node* node, bool at_end_ok = false); 1223 void verify_limit(); 1224 void verify_step(uint num_edges); 1225 #endif 1226 1227 // Note: offset must be signed, since -1 is sometimes passed 1228 DUIterator_Last(const Node* node, ptrdiff_t offset) 1229 : DUIterator_Fast(node, offset) { } 1230 1231 void operator++(int dummy_to_specify_postfix_op) {} // do not use 1232 void operator<(int) {} // do not use 1233 1234 public: 1235 DUIterator_Last() { } 1236 // initialize to garbage 1237 1238 void operator--() 1239 { _outp--; VDUI_ONLY(verify_step(1)); } 1240 1241 void operator-=(uint n) 1242 { _outp -= n; VDUI_ONLY(verify_step(n)); } 1243 1244 bool operator>=(DUIterator_Last& limit) { 1245 I_VDUI_ONLY(*this, this->verify(_node, true)); 1246 I_VDUI_ONLY(limit, limit.verify_limit()); 1247 return _outp >= limit._outp; 1248 } 1249 1250 void operator=(const DUIterator_Last& that) 1251 { DUIterator_Fast::operator=(that); } 1252 }; 1253 1254 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { 1255 // Assign a limit pointer to the reference argument: 1256 imin = DUIterator_Last(this, 0); 1257 // Return the initial pointer: 1258 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); 1259 } 1260 Node* Node::last_out(DUIterator_Last& i) const { 1261 I_VDUI_ONLY(i, i.verify(this)); 1262 return debug_only(i._last=) *i._outp; 1263 } 1264 1265 #endif //OPTO_DU_ITERATOR_ASSERT 1266 1267 #undef I_VDUI_ONLY 1268 #undef VDUI_ONLY 1269 1270 // An Iterator that truly follows the iterator pattern. Doesn't 1271 // support deletion but could be made to. 1272 // 1273 // for (SimpleDUIterator i(n); i.has_next(); i.next()) { 1274 // Node* m = i.get(); 1275 // 1276 class SimpleDUIterator : public StackObj { 1277 private: 1278 Node* node; 1279 DUIterator_Fast i; 1280 DUIterator_Fast imax; 1281 public: 1282 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} 1283 bool has_next() { return i < imax; } 1284 void next() { i++; } 1285 Node* get() { return node->fast_out(i); } 1286 }; 1287 1288 1289 //----------------------------------------------------------------------------- 1290 // Map dense integer indices to Nodes. Uses classic doubling-array trick. 1291 // Abstractly provides an infinite array of Node*'s, initialized to NULL. 1292 // Note that the constructor just zeros things, and since I use Arena 1293 // allocation I do not need a destructor to reclaim storage. 1294 class Node_Array : public ResourceObj { 1295 protected: 1296 Arena *_a; // Arena to allocate in 1297 uint _max; 1298 Node **_nodes; 1299 void grow( uint i ); // Grow array node to fit 1300 public: 1301 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { 1302 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); 1303 for( int i = 0; i < OptoNodeListSize; i++ ) { 1304 _nodes[i] = NULL; 1305 } 1306 } 1307 1308 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} 1309 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped 1310 { return (i<_max) ? _nodes[i] : (Node*)NULL; } 1311 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } 1312 Node **adr() { return _nodes; } 1313 // Extend the mapping: index i maps to Node *n. 1314 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } 1315 void insert( uint i, Node *n ); 1316 void remove( uint i ); // Remove, preserving order 1317 void sort( C_sort_func_t func); 1318 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage 1319 void clear(); // Set all entries to NULL, keep storage 1320 uint Size() const { return _max; } 1321 void dump() const; 1322 }; 1323 1324 class Node_List : public Node_Array { 1325 uint _cnt; 1326 public: 1327 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} 1328 Node_List(Arena *a) : Node_Array(a), _cnt(0) {} 1329 bool contains(Node* n) { 1330 for (uint e = 0; e < size(); e++) { 1331 if (at(e) == n) return true; 1332 } 1333 return false; 1334 } 1335 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } 1336 void remove( uint i ) { Node_Array::remove(i); _cnt--; } 1337 void push( Node *b ) { map(_cnt++,b); } 1338 void yank( Node *n ); // Find and remove 1339 Node *pop() { return _nodes[--_cnt]; } 1340 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} 1341 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage 1342 uint size() const { return _cnt; } 1343 void dump() const; 1344 }; 1345 1346 //------------------------------Unique_Node_List------------------------------- 1347 class Unique_Node_List : public Node_List { 1348 VectorSet _in_worklist; 1349 uint _clock_index; // Index in list where to pop from next 1350 public: 1351 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} 1352 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} 1353 1354 void remove( Node *n ); 1355 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } 1356 VectorSet &member_set(){ return _in_worklist; } 1357 1358 void push( Node *b ) { 1359 if( !_in_worklist.test_set(b->_idx) ) 1360 Node_List::push(b); 1361 } 1362 Node *pop() { 1363 if( _clock_index >= size() ) _clock_index = 0; 1364 Node *b = at(_clock_index); 1365 map( _clock_index, Node_List::pop()); 1366 if (size() != 0) _clock_index++; // Always start from 0 1367 _in_worklist >>= b->_idx; 1368 return b; 1369 } 1370 Node *remove( uint i ) { 1371 Node *b = Node_List::at(i); 1372 _in_worklist >>= b->_idx; 1373 map(i,Node_List::pop()); 1374 return b; 1375 } 1376 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } 1377 void clear() { 1378 _in_worklist.Clear(); // Discards storage but grows automatically 1379 Node_List::clear(); 1380 _clock_index = 0; 1381 } 1382 1383 // Used after parsing to remove useless nodes before Iterative GVN 1384 void remove_useless_nodes(VectorSet &useful); 1385 1386 #ifndef PRODUCT 1387 void print_set() const { _in_worklist.print(); } 1388 #endif 1389 }; 1390 1391 // Inline definition of Compile::record_for_igvn must be deferred to this point. 1392 inline void Compile::record_for_igvn(Node* n) { 1393 _for_igvn->push(n); 1394 } 1395 1396 //------------------------------Node_Stack------------------------------------- 1397 class Node_Stack { 1398 protected: 1399 struct INode { 1400 Node *node; // Processed node 1401 uint indx; // Index of next node's child 1402 }; 1403 INode *_inode_top; // tos, stack grows up 1404 INode *_inode_max; // End of _inodes == _inodes + _max 1405 INode *_inodes; // Array storage for the stack 1406 Arena *_a; // Arena to allocate in 1407 void grow(); 1408 public: 1409 Node_Stack(int size) { 1410 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1411 _a = Thread::current()->resource_area(); 1412 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1413 _inode_max = _inodes + max; 1414 _inode_top = _inodes - 1; // stack is empty 1415 } 1416 1417 Node_Stack(Arena *a, int size) : _a(a) { 1418 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1419 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1420 _inode_max = _inodes + max; 1421 _inode_top = _inodes - 1; // stack is empty 1422 } 1423 1424 void pop() { 1425 assert(_inode_top >= _inodes, "node stack underflow"); 1426 --_inode_top; 1427 } 1428 void push(Node *n, uint i) { 1429 ++_inode_top; 1430 if (_inode_top >= _inode_max) grow(); 1431 INode *top = _inode_top; // optimization 1432 top->node = n; 1433 top->indx = i; 1434 } 1435 Node *node() const { 1436 return _inode_top->node; 1437 } 1438 Node* node_at(uint i) const { 1439 assert(_inodes + i <= _inode_top, "in range"); 1440 return _inodes[i].node; 1441 } 1442 uint index() const { 1443 return _inode_top->indx; 1444 } 1445 uint index_at(uint i) const { 1446 assert(_inodes + i <= _inode_top, "in range"); 1447 return _inodes[i].indx; 1448 } 1449 void set_node(Node *n) { 1450 _inode_top->node = n; 1451 } 1452 void set_index(uint i) { 1453 _inode_top->indx = i; 1454 } 1455 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size 1456 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size 1457 bool is_nonempty() const { return (_inode_top >= _inodes); } 1458 bool is_empty() const { return (_inode_top < _inodes); } 1459 void clear() { _inode_top = _inodes - 1; } // retain storage 1460 }; 1461 1462 1463 //-----------------------------Node_Notes-------------------------------------- 1464 // Debugging or profiling annotations loosely and sparsely associated 1465 // with some nodes. See Compile::node_notes_at for the accessor. 1466 class Node_Notes VALUE_OBJ_CLASS_SPEC { 1467 JVMState* _jvms; 1468 1469 public: 1470 Node_Notes(JVMState* jvms = NULL) { 1471 _jvms = jvms; 1472 } 1473 1474 JVMState* jvms() { return _jvms; } 1475 void set_jvms(JVMState* x) { _jvms = x; } 1476 1477 // True if there is nothing here. 1478 bool is_clear() { 1479 return (_jvms == NULL); 1480 } 1481 1482 // Make there be nothing here. 1483 void clear() { 1484 _jvms = NULL; 1485 } 1486 1487 // Make a new, clean node notes. 1488 static Node_Notes* make(Compile* C) { 1489 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1490 nn->clear(); 1491 return nn; 1492 } 1493 1494 Node_Notes* clone(Compile* C) { 1495 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1496 (*nn) = (*this); 1497 return nn; 1498 } 1499 1500 // Absorb any information from source. 1501 bool update_from(Node_Notes* source) { 1502 bool changed = false; 1503 if (source != NULL) { 1504 if (source->jvms() != NULL) { 1505 set_jvms(source->jvms()); 1506 changed = true; 1507 } 1508 } 1509 return changed; 1510 } 1511 }; 1512 1513 // Inlined accessors for Compile::node_nodes that require the preceding class: 1514 inline Node_Notes* 1515 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, 1516 int idx, bool can_grow) { 1517 assert(idx >= 0, "oob"); 1518 int block_idx = (idx >> _log2_node_notes_block_size); 1519 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); 1520 if (grow_by >= 0) { 1521 if (!can_grow) return NULL; 1522 grow_node_notes(arr, grow_by + 1); 1523 } 1524 // (Every element of arr is a sub-array of length _node_notes_block_size.) 1525 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); 1526 } 1527 1528 inline bool 1529 Compile::set_node_notes_at(int idx, Node_Notes* value) { 1530 if (value == NULL || value->is_clear()) 1531 return false; // nothing to write => write nothing 1532 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); 1533 assert(loc != NULL, ""); 1534 return loc->update_from(value); 1535 } 1536 1537 1538 //------------------------------TypeNode--------------------------------------- 1539 // Node with a Type constant. 1540 class TypeNode : public Node { 1541 protected: 1542 virtual uint hash() const; // Check the type 1543 virtual uint cmp( const Node &n ) const; 1544 virtual uint size_of() const; // Size is bigger 1545 const Type* const _type; 1546 public: 1547 void set_type(const Type* t) { 1548 assert(t != NULL, "sanity"); 1549 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 1550 *(const Type**)&_type = t; // cast away const-ness 1551 // If this node is in the hash table, make sure it doesn't need a rehash. 1552 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 1553 } 1554 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 1555 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { 1556 init_class_id(Class_Type); 1557 } 1558 virtual const Type *Value( PhaseTransform *phase ) const; 1559 virtual const Type *bottom_type() const; 1560 virtual uint ideal_reg() const; 1561 #ifndef PRODUCT 1562 virtual void dump_spec(outputStream *st) const; 1563 #endif 1564 }; 1565 1566 #endif // SHARE_VM_OPTO_NODE_HPP