1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/cfgnode.hpp" 29 #include "opto/connode.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/machnode.hpp" 32 #include "opto/matcher.hpp" 33 #include "opto/node.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/regmask.hpp" 36 #include "opto/type.hpp" 37 #include "utilities/copy.hpp" 38 39 class RegMask; 40 // #include "phase.hpp" 41 class PhaseTransform; 42 class PhaseGVN; 43 44 // Arena we are currently building Nodes in 45 const uint Node::NotAMachineReg = 0xffff0000; 46 47 #ifndef PRODUCT 48 extern int nodes_created; 49 #endif 50 #ifdef __clang__ 51 #pragma clang diagnostic push 52 #pragma GCC diagnostic ignored "-Wuninitialized" 53 #endif 54 55 #ifdef ASSERT 56 57 //-------------------------- construct_node------------------------------------ 58 // Set a breakpoint here to identify where a particular node index is built. 59 void Node::verify_construction() { 60 _debug_orig = NULL; 61 int old_debug_idx = Compile::debug_idx(); 62 int new_debug_idx = old_debug_idx+1; 63 if (new_debug_idx > 0) { 64 // Arrange that the lowest five decimal digits of _debug_idx 65 // will repeat those of _idx. In case this is somehow pathological, 66 // we continue to assign negative numbers (!) consecutively. 67 const int mod = 100000; 68 int bump = (int)(_idx - new_debug_idx) % mod; 69 if (bump < 0) bump += mod; 70 assert(bump >= 0 && bump < mod, ""); 71 new_debug_idx += bump; 72 } 73 Compile::set_debug_idx(new_debug_idx); 74 set_debug_idx( new_debug_idx ); 75 assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX"); 76 assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit"); 77 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { 78 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); 79 BREAKPOINT; 80 } 81 #if OPTO_DU_ITERATOR_ASSERT 82 _last_del = NULL; 83 _del_tick = 0; 84 #endif 85 _hash_lock = 0; 86 } 87 88 89 // #ifdef ASSERT ... 90 91 #if OPTO_DU_ITERATOR_ASSERT 92 void DUIterator_Common::sample(const Node* node) { 93 _vdui = VerifyDUIterators; 94 _node = node; 95 _outcnt = node->_outcnt; 96 _del_tick = node->_del_tick; 97 _last = NULL; 98 } 99 100 void DUIterator_Common::verify(const Node* node, bool at_end_ok) { 101 assert(_node == node, "consistent iterator source"); 102 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed"); 103 } 104 105 void DUIterator_Common::verify_resync() { 106 // Ensure that the loop body has just deleted the last guy produced. 107 const Node* node = _node; 108 // Ensure that at least one copy of the last-seen edge was deleted. 109 // Note: It is OK to delete multiple copies of the last-seen edge. 110 // Unfortunately, we have no way to verify that all the deletions delete 111 // that same edge. On this point we must use the Honor System. 112 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge"); 113 assert(node->_last_del == _last, "must have deleted the edge just produced"); 114 // We liked this deletion, so accept the resulting outcnt and tick. 115 _outcnt = node->_outcnt; 116 _del_tick = node->_del_tick; 117 } 118 119 void DUIterator_Common::reset(const DUIterator_Common& that) { 120 if (this == &that) return; // ignore assignment to self 121 if (!_vdui) { 122 // We need to initialize everything, overwriting garbage values. 123 _last = that._last; 124 _vdui = that._vdui; 125 } 126 // Note: It is legal (though odd) for an iterator over some node x 127 // to be reassigned to iterate over another node y. Some doubly-nested 128 // progress loops depend on being able to do this. 129 const Node* node = that._node; 130 // Re-initialize everything, except _last. 131 _node = node; 132 _outcnt = node->_outcnt; 133 _del_tick = node->_del_tick; 134 } 135 136 void DUIterator::sample(const Node* node) { 137 DUIterator_Common::sample(node); // Initialize the assertion data. 138 _refresh_tick = 0; // No refreshes have happened, as yet. 139 } 140 141 void DUIterator::verify(const Node* node, bool at_end_ok) { 142 DUIterator_Common::verify(node, at_end_ok); 143 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range"); 144 } 145 146 void DUIterator::verify_increment() { 147 if (_refresh_tick & 1) { 148 // We have refreshed the index during this loop. 149 // Fix up _idx to meet asserts. 150 if (_idx > _outcnt) _idx = _outcnt; 151 } 152 verify(_node, true); 153 } 154 155 void DUIterator::verify_resync() { 156 // Note: We do not assert on _outcnt, because insertions are OK here. 157 DUIterator_Common::verify_resync(); 158 // Make sure we are still in sync, possibly with no more out-edges: 159 verify(_node, true); 160 } 161 162 void DUIterator::reset(const DUIterator& that) { 163 if (this == &that) return; // self assignment is always a no-op 164 assert(that._refresh_tick == 0, "assign only the result of Node::outs()"); 165 assert(that._idx == 0, "assign only the result of Node::outs()"); 166 assert(_idx == that._idx, "already assigned _idx"); 167 if (!_vdui) { 168 // We need to initialize everything, overwriting garbage values. 169 sample(that._node); 170 } else { 171 DUIterator_Common::reset(that); 172 if (_refresh_tick & 1) { 173 _refresh_tick++; // Clear the "was refreshed" flag. 174 } 175 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly"); 176 } 177 } 178 179 void DUIterator::refresh() { 180 DUIterator_Common::sample(_node); // Re-fetch assertion data. 181 _refresh_tick |= 1; // Set the "was refreshed" flag. 182 } 183 184 void DUIterator::verify_finish() { 185 // If the loop has killed the node, do not require it to re-run. 186 if (_node->_outcnt == 0) _refresh_tick &= ~1; 187 // If this assert triggers, it means that a loop used refresh_out_pos 188 // to re-synch an iteration index, but the loop did not correctly 189 // re-run itself, using a "while (progress)" construct. 190 // This iterator enforces the rule that you must keep trying the loop 191 // until it "runs clean" without any need for refreshing. 192 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing"); 193 } 194 195 196 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) { 197 DUIterator_Common::verify(node, at_end_ok); 198 Node** out = node->_out; 199 uint cnt = node->_outcnt; 200 assert(cnt == _outcnt, "no insertions allowed"); 201 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range"); 202 // This last check is carefully designed to work for NO_OUT_ARRAY. 203 } 204 205 void DUIterator_Fast::verify_limit() { 206 const Node* node = _node; 207 verify(node, true); 208 assert(_outp == node->_out + node->_outcnt, "limit still correct"); 209 } 210 211 void DUIterator_Fast::verify_resync() { 212 const Node* node = _node; 213 if (_outp == node->_out + _outcnt) { 214 // Note that the limit imax, not the pointer i, gets updated with the 215 // exact count of deletions. (For the pointer it's always "--i".) 216 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)"); 217 // This is a limit pointer, with a name like "imax". 218 // Fudge the _last field so that the common assert will be happy. 219 _last = (Node*) node->_last_del; 220 DUIterator_Common::verify_resync(); 221 } else { 222 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)"); 223 // A normal internal pointer. 224 DUIterator_Common::verify_resync(); 225 // Make sure we are still in sync, possibly with no more out-edges: 226 verify(node, true); 227 } 228 } 229 230 void DUIterator_Fast::verify_relimit(uint n) { 231 const Node* node = _node; 232 assert((int)n > 0, "use imax -= n only with a positive count"); 233 // This must be a limit pointer, with a name like "imax". 234 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)"); 235 // The reported number of deletions must match what the node saw. 236 assert(node->_del_tick == _del_tick + n, "must have deleted n edges"); 237 // Fudge the _last field so that the common assert will be happy. 238 _last = (Node*) node->_last_del; 239 DUIterator_Common::verify_resync(); 240 } 241 242 void DUIterator_Fast::reset(const DUIterator_Fast& that) { 243 assert(_outp == that._outp, "already assigned _outp"); 244 DUIterator_Common::reset(that); 245 } 246 247 void DUIterator_Last::verify(const Node* node, bool at_end_ok) { 248 // at_end_ok means the _outp is allowed to underflow by 1 249 _outp += at_end_ok; 250 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc. 251 _outp -= at_end_ok; 252 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes"); 253 } 254 255 void DUIterator_Last::verify_limit() { 256 // Do not require the limit address to be resynched. 257 //verify(node, true); 258 assert(_outp == _node->_out, "limit still correct"); 259 } 260 261 void DUIterator_Last::verify_step(uint num_edges) { 262 assert((int)num_edges > 0, "need non-zero edge count for loop progress"); 263 _outcnt -= num_edges; 264 _del_tick += num_edges; 265 // Make sure we are still in sync, possibly with no more out-edges: 266 const Node* node = _node; 267 verify(node, true); 268 assert(node->_last_del == _last, "must have deleted the edge just produced"); 269 } 270 271 #endif //OPTO_DU_ITERATOR_ASSERT 272 273 274 #endif //ASSERT 275 276 277 // This constant used to initialize _out may be any non-null value. 278 // The value NULL is reserved for the top node only. 279 #define NO_OUT_ARRAY ((Node**)-1) 280 281 // Out-of-line code from node constructors. 282 // Executed only when extra debug info. is being passed around. 283 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) { 284 C->set_node_notes_at(idx, nn); 285 } 286 287 // Shared initialization code. 288 inline int Node::Init(int req) { 289 Compile* C = Compile::current(); 290 int idx = C->next_unique(); 291 292 // Allocate memory for the necessary number of edges. 293 if (req > 0) { 294 // Allocate space for _in array to have double alignment. 295 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*)))); 296 #ifdef ASSERT 297 _in[req-1] = this; // magic cookie for assertion check 298 #endif 299 } 300 // If there are default notes floating around, capture them: 301 Node_Notes* nn = C->default_node_notes(); 302 if (nn != NULL) init_node_notes(C, idx, nn); 303 304 // Note: At this point, C is dead, 305 // and we begin to initialize the new Node. 306 307 _cnt = _max = req; 308 _outcnt = _outmax = 0; 309 _class_id = Class_Node; 310 _flags = 0; 311 _out = NO_OUT_ARRAY; 312 return idx; 313 } 314 315 //------------------------------Node------------------------------------------- 316 // Create a Node, with a given number of required edges. 317 Node::Node(uint req) 318 : _idx(Init(req)) 319 { 320 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" ); 321 debug_only( verify_construction() ); 322 NOT_PRODUCT(nodes_created++); 323 if (req == 0) { 324 assert( _in == (Node**)this, "Must not pass arg count to 'new'" ); 325 _in = NULL; 326 } else { 327 assert( _in[req-1] == this, "Must pass arg count to 'new'" ); 328 Node** to = _in; 329 for(uint i = 0; i < req; i++) { 330 to[i] = NULL; 331 } 332 } 333 } 334 335 //------------------------------Node------------------------------------------- 336 Node::Node(Node *n0) 337 : _idx(Init(1)) 338 { 339 debug_only( verify_construction() ); 340 NOT_PRODUCT(nodes_created++); 341 // Assert we allocated space for input array already 342 assert( _in[0] == this, "Must pass arg count to 'new'" ); 343 assert( is_not_dead(n0), "can not use dead node"); 344 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 345 } 346 347 //------------------------------Node------------------------------------------- 348 Node::Node(Node *n0, Node *n1) 349 : _idx(Init(2)) 350 { 351 debug_only( verify_construction() ); 352 NOT_PRODUCT(nodes_created++); 353 // Assert we allocated space for input array already 354 assert( _in[1] == this, "Must pass arg count to 'new'" ); 355 assert( is_not_dead(n0), "can not use dead node"); 356 assert( is_not_dead(n1), "can not use dead node"); 357 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 358 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 359 } 360 361 //------------------------------Node------------------------------------------- 362 Node::Node(Node *n0, Node *n1, Node *n2) 363 : _idx(Init(3)) 364 { 365 debug_only( verify_construction() ); 366 NOT_PRODUCT(nodes_created++); 367 // Assert we allocated space for input array already 368 assert( _in[2] == this, "Must pass arg count to 'new'" ); 369 assert( is_not_dead(n0), "can not use dead node"); 370 assert( is_not_dead(n1), "can not use dead node"); 371 assert( is_not_dead(n2), "can not use dead node"); 372 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 373 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 374 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 375 } 376 377 //------------------------------Node------------------------------------------- 378 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) 379 : _idx(Init(4)) 380 { 381 debug_only( verify_construction() ); 382 NOT_PRODUCT(nodes_created++); 383 // Assert we allocated space for input array already 384 assert( _in[3] == this, "Must pass arg count to 'new'" ); 385 assert( is_not_dead(n0), "can not use dead node"); 386 assert( is_not_dead(n1), "can not use dead node"); 387 assert( is_not_dead(n2), "can not use dead node"); 388 assert( is_not_dead(n3), "can not use dead node"); 389 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 390 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 391 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 392 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 393 } 394 395 //------------------------------Node------------------------------------------- 396 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) 397 : _idx(Init(5)) 398 { 399 debug_only( verify_construction() ); 400 NOT_PRODUCT(nodes_created++); 401 // Assert we allocated space for input array already 402 assert( _in[4] == this, "Must pass arg count to 'new'" ); 403 assert( is_not_dead(n0), "can not use dead node"); 404 assert( is_not_dead(n1), "can not use dead node"); 405 assert( is_not_dead(n2), "can not use dead node"); 406 assert( is_not_dead(n3), "can not use dead node"); 407 assert( is_not_dead(n4), "can not use dead node"); 408 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 409 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 410 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 411 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 412 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 413 } 414 415 //------------------------------Node------------------------------------------- 416 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 417 Node *n4, Node *n5) 418 : _idx(Init(6)) 419 { 420 debug_only( verify_construction() ); 421 NOT_PRODUCT(nodes_created++); 422 // Assert we allocated space for input array already 423 assert( _in[5] == this, "Must pass arg count to 'new'" ); 424 assert( is_not_dead(n0), "can not use dead node"); 425 assert( is_not_dead(n1), "can not use dead node"); 426 assert( is_not_dead(n2), "can not use dead node"); 427 assert( is_not_dead(n3), "can not use dead node"); 428 assert( is_not_dead(n4), "can not use dead node"); 429 assert( is_not_dead(n5), "can not use dead node"); 430 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 431 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 432 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 433 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 434 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 435 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 436 } 437 438 //------------------------------Node------------------------------------------- 439 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 440 Node *n4, Node *n5, Node *n6) 441 : _idx(Init(7)) 442 { 443 debug_only( verify_construction() ); 444 NOT_PRODUCT(nodes_created++); 445 // Assert we allocated space for input array already 446 assert( _in[6] == this, "Must pass arg count to 'new'" ); 447 assert( is_not_dead(n0), "can not use dead node"); 448 assert( is_not_dead(n1), "can not use dead node"); 449 assert( is_not_dead(n2), "can not use dead node"); 450 assert( is_not_dead(n3), "can not use dead node"); 451 assert( is_not_dead(n4), "can not use dead node"); 452 assert( is_not_dead(n5), "can not use dead node"); 453 assert( is_not_dead(n6), "can not use dead node"); 454 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 455 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 456 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 457 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 458 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 459 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 460 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); 461 } 462 463 #ifdef __clang__ 464 #pragma clang diagnostic pop 465 #endif 466 467 468 //------------------------------clone------------------------------------------ 469 // Clone a Node. 470 Node *Node::clone() const { 471 Compile* C = Compile::current(); 472 uint s = size_of(); // Size of inherited Node 473 Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); 474 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); 475 // Set the new input pointer array 476 n->_in = (Node**)(((char*)n)+s); 477 // Cannot share the old output pointer array, so kill it 478 n->_out = NO_OUT_ARRAY; 479 // And reset the counters to 0 480 n->_outcnt = 0; 481 n->_outmax = 0; 482 // Unlock this guy, since he is not in any hash table. 483 debug_only(n->_hash_lock = 0); 484 // Walk the old node's input list to duplicate its edges 485 uint i; 486 for( i = 0; i < len(); i++ ) { 487 Node *x = in(i); 488 n->_in[i] = x; 489 if (x != NULL) x->add_out(n); 490 } 491 if (is_macro()) 492 C->add_macro_node(n); 493 if (is_expensive()) 494 C->add_expensive_node(n); 495 496 n->set_idx(C->next_unique()); // Get new unique index as well 497 debug_only( n->verify_construction() ); 498 NOT_PRODUCT(nodes_created++); 499 // Do not patch over the debug_idx of a clone, because it makes it 500 // impossible to break on the clone's moment of creation. 501 //debug_only( n->set_debug_idx( debug_idx() ) ); 502 503 C->copy_node_notes_to(n, (Node*) this); 504 505 // MachNode clone 506 uint nopnds; 507 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) { 508 MachNode *mach = n->as_Mach(); 509 MachNode *mthis = this->as_Mach(); 510 // Get address of _opnd_array. 511 // It should be the same offset since it is the clone of this node. 512 MachOper **from = mthis->_opnds; 513 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 514 pointer_delta((const void*)from, 515 (const void*)(&mthis->_opnds), 1)); 516 mach->_opnds = to; 517 for ( uint i = 0; i < nopnds; ++i ) { 518 to[i] = from[i]->clone(); 519 } 520 } 521 // cloning CallNode may need to clone JVMState 522 if (n->is_Call()) { 523 n->as_Call()->clone_jvms(C); 524 } 525 if (n->is_SafePoint()) { 526 n->as_SafePoint()->clone_replaced_nodes(); 527 } 528 return n; // Return the clone 529 } 530 531 //---------------------------setup_is_top-------------------------------------- 532 // Call this when changing the top node, to reassert the invariants 533 // required by Node::is_top. See Compile::set_cached_top_node. 534 void Node::setup_is_top() { 535 if (this == (Node*)Compile::current()->top()) { 536 // This node has just become top. Kill its out array. 537 _outcnt = _outmax = 0; 538 _out = NULL; // marker value for top 539 assert(is_top(), "must be top"); 540 } else { 541 if (_out == NULL) _out = NO_OUT_ARRAY; 542 assert(!is_top(), "must not be top"); 543 } 544 } 545 546 547 //------------------------------~Node------------------------------------------ 548 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 549 extern int reclaim_idx ; 550 extern int reclaim_in ; 551 extern int reclaim_node; 552 void Node::destruct() { 553 // Eagerly reclaim unique Node numberings 554 Compile* compile = Compile::current(); 555 if ((uint)_idx+1 == compile->unique()) { 556 compile->set_unique(compile->unique()-1); 557 #ifdef ASSERT 558 reclaim_idx++; 559 #endif 560 } 561 // Clear debug info: 562 Node_Notes* nn = compile->node_notes_at(_idx); 563 if (nn != NULL) nn->clear(); 564 // Walk the input array, freeing the corresponding output edges 565 _cnt = _max; // forget req/prec distinction 566 uint i; 567 for( i = 0; i < _max; i++ ) { 568 set_req(i, NULL); 569 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); 570 } 571 assert(outcnt() == 0, "deleting a node must not leave a dangling use"); 572 // See if the input array was allocated just prior to the object 573 int edge_size = _max*sizeof(void*); 574 int out_edge_size = _outmax*sizeof(void*); 575 char *edge_end = ((char*)_in) + edge_size; 576 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); 577 char *out_edge_end = out_array + out_edge_size; 578 int node_size = size_of(); 579 580 // Free the output edge array 581 if (out_edge_size > 0) { 582 #ifdef ASSERT 583 if( out_edge_end == compile->node_arena()->hwm() ) 584 reclaim_in += out_edge_size; // count reclaimed out edges with in edges 585 #endif 586 compile->node_arena()->Afree(out_array, out_edge_size); 587 } 588 589 // Free the input edge array and the node itself 590 if( edge_end == (char*)this ) { 591 #ifdef ASSERT 592 if( edge_end+node_size == compile->node_arena()->hwm() ) { 593 reclaim_in += edge_size; 594 reclaim_node+= node_size; 595 } 596 #else 597 // It was; free the input array and object all in one hit 598 compile->node_arena()->Afree(_in,edge_size+node_size); 599 #endif 600 } else { 601 602 // Free just the input array 603 #ifdef ASSERT 604 if( edge_end == compile->node_arena()->hwm() ) 605 reclaim_in += edge_size; 606 #endif 607 compile->node_arena()->Afree(_in,edge_size); 608 609 // Free just the object 610 #ifdef ASSERT 611 if( ((char*)this) + node_size == compile->node_arena()->hwm() ) 612 reclaim_node+= node_size; 613 #else 614 compile->node_arena()->Afree(this,node_size); 615 #endif 616 } 617 if (is_macro()) { 618 compile->remove_macro_node(this); 619 } 620 if (is_expensive()) { 621 compile->remove_expensive_node(this); 622 } 623 if (is_SafePoint()) { 624 as_SafePoint()->delete_replaced_nodes(); 625 } 626 #ifdef ASSERT 627 // We will not actually delete the storage, but we'll make the node unusable. 628 *(address*)this = badAddress; // smash the C++ vtbl, probably 629 _in = _out = (Node**) badAddress; 630 _max = _cnt = _outmax = _outcnt = 0; 631 compile->remove_modified_node(this); 632 #endif 633 } 634 635 //------------------------------grow------------------------------------------- 636 // Grow the input array, making space for more edges 637 void Node::grow( uint len ) { 638 Arena* arena = Compile::current()->node_arena(); 639 uint new_max = _max; 640 if( new_max == 0 ) { 641 _max = 4; 642 _in = (Node**)arena->Amalloc(4*sizeof(Node*)); 643 Node** to = _in; 644 to[0] = NULL; 645 to[1] = NULL; 646 to[2] = NULL; 647 to[3] = NULL; 648 return; 649 } 650 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 651 // Trimming to limit allows a uint8 to handle up to 255 edges. 652 // Previously I was using only powers-of-2 which peaked at 128 edges. 653 //if( new_max >= limit ) new_max = limit-1; 654 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); 655 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space 656 _max = new_max; // Record new max length 657 // This assertion makes sure that Node::_max is wide enough to 658 // represent the numerical value of new_max. 659 assert(_max == new_max && _max > len, "int width of _max is too small"); 660 } 661 662 //-----------------------------out_grow---------------------------------------- 663 // Grow the input array, making space for more edges 664 void Node::out_grow( uint len ) { 665 assert(!is_top(), "cannot grow a top node's out array"); 666 Arena* arena = Compile::current()->node_arena(); 667 uint new_max = _outmax; 668 if( new_max == 0 ) { 669 _outmax = 4; 670 _out = (Node **)arena->Amalloc(4*sizeof(Node*)); 671 return; 672 } 673 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 674 // Trimming to limit allows a uint8 to handle up to 255 edges. 675 // Previously I was using only powers-of-2 which peaked at 128 edges. 676 //if( new_max >= limit ) new_max = limit-1; 677 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); 678 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); 679 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space 680 _outmax = new_max; // Record new max length 681 // This assertion makes sure that Node::_max is wide enough to 682 // represent the numerical value of new_max. 683 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small"); 684 } 685 686 #ifdef ASSERT 687 //------------------------------is_dead---------------------------------------- 688 bool Node::is_dead() const { 689 // Mach and pinch point nodes may look like dead. 690 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) 691 return false; 692 for( uint i = 0; i < _max; i++ ) 693 if( _in[i] != NULL ) 694 return false; 695 dump(); 696 return true; 697 } 698 #endif 699 700 701 //------------------------------is_unreachable--------------------------------- 702 bool Node::is_unreachable(PhaseIterGVN &igvn) const { 703 assert(!is_Mach(), "doesn't work with MachNodes"); 704 return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top(); 705 } 706 707 //------------------------------add_req---------------------------------------- 708 // Add a new required input at the end 709 void Node::add_req( Node *n ) { 710 assert( is_not_dead(n), "can not use dead node"); 711 712 // Look to see if I can move precedence down one without reallocating 713 if( (_cnt >= _max) || (in(_max-1) != NULL) ) 714 grow( _max+1 ); 715 716 // Find a precedence edge to move 717 if( in(_cnt) != NULL ) { // Next precedence edge is busy? 718 uint i; 719 for( i=_cnt; i<_max; i++ ) 720 if( in(i) == NULL ) // Find the NULL at end of prec edge list 721 break; // There must be one, since we grew the array 722 _in[i] = in(_cnt); // Move prec over, making space for req edge 723 } 724 _in[_cnt++] = n; // Stuff over old prec edge 725 if (n != NULL) n->add_out((Node *)this); 726 } 727 728 //---------------------------add_req_batch------------------------------------- 729 // Add a new required input at the end 730 void Node::add_req_batch( Node *n, uint m ) { 731 assert( is_not_dead(n), "can not use dead node"); 732 // check various edge cases 733 if ((int)m <= 1) { 734 assert((int)m >= 0, "oob"); 735 if (m != 0) add_req(n); 736 return; 737 } 738 739 // Look to see if I can move precedence down one without reallocating 740 if( (_cnt+m) > _max || _in[_max-m] ) 741 grow( _max+m ); 742 743 // Find a precedence edge to move 744 if( _in[_cnt] != NULL ) { // Next precedence edge is busy? 745 uint i; 746 for( i=_cnt; i<_max; i++ ) 747 if( _in[i] == NULL ) // Find the NULL at end of prec edge list 748 break; // There must be one, since we grew the array 749 // Slide all the precs over by m positions (assume #prec << m). 750 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); 751 } 752 753 // Stuff over the old prec edges 754 for(uint i=0; i<m; i++ ) { 755 _in[_cnt++] = n; 756 } 757 758 // Insert multiple out edges on the node. 759 if (n != NULL && !n->is_top()) { 760 for(uint i=0; i<m; i++ ) { 761 n->add_out((Node *)this); 762 } 763 } 764 } 765 766 //------------------------------del_req---------------------------------------- 767 // Delete the required edge and compact the edge array 768 void Node::del_req( uint idx ) { 769 assert( idx < _cnt, "oob"); 770 assert( !VerifyHashTableKeys || _hash_lock == 0, 771 "remove node from hash table before modifying it"); 772 // First remove corresponding def-use edge 773 Node *n = in(idx); 774 if (n != NULL) n->del_out((Node *)this); 775 _in[idx] = in(--_cnt); // Compact the array 776 _in[_cnt] = NULL; // NULL out emptied slot 777 Compile::current()->record_modified_node(this); 778 } 779 780 //------------------------------del_req_ordered-------------------------------- 781 // Delete the required edge and compact the edge array with preserved order 782 void Node::del_req_ordered( uint idx ) { 783 assert( idx < _cnt, "oob"); 784 assert( !VerifyHashTableKeys || _hash_lock == 0, 785 "remove node from hash table before modifying it"); 786 // First remove corresponding def-use edge 787 Node *n = in(idx); 788 if (n != NULL) n->del_out((Node *)this); 789 if (idx < _cnt - 1) { // Not last edge ? 790 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*))); 791 } 792 _in[--_cnt] = NULL; // NULL out emptied slot 793 Compile::current()->record_modified_node(this); 794 } 795 796 //------------------------------ins_req---------------------------------------- 797 // Insert a new required input at the end 798 void Node::ins_req( uint idx, Node *n ) { 799 assert( is_not_dead(n), "can not use dead node"); 800 add_req(NULL); // Make space 801 assert( idx < _max, "Must have allocated enough space"); 802 // Slide over 803 if(_cnt-idx-1 > 0) { 804 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); 805 } 806 _in[idx] = n; // Stuff over old required edge 807 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge 808 } 809 810 //-----------------------------find_edge--------------------------------------- 811 int Node::find_edge(Node* n) { 812 for (uint i = 0; i < len(); i++) { 813 if (_in[i] == n) return i; 814 } 815 return -1; 816 } 817 818 //----------------------------replace_edge------------------------------------- 819 int Node::replace_edge(Node* old, Node* neww) { 820 if (old == neww) return 0; // nothing to do 821 uint nrep = 0; 822 for (uint i = 0; i < len(); i++) { 823 if (in(i) == old) { 824 if (i < req()) 825 set_req(i, neww); 826 else 827 set_prec(i, neww); 828 nrep++; 829 } 830 } 831 return nrep; 832 } 833 834 /** 835 * Replace input edges in the range pointing to 'old' node. 836 */ 837 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) { 838 if (old == neww) return 0; // nothing to do 839 uint nrep = 0; 840 for (int i = start; i < end; i++) { 841 if (in(i) == old) { 842 set_req(i, neww); 843 nrep++; 844 } 845 } 846 return nrep; 847 } 848 849 //-------------------------disconnect_inputs----------------------------------- 850 // NULL out all inputs to eliminate incoming Def-Use edges. 851 // Return the number of edges between 'n' and 'this' 852 int Node::disconnect_inputs(Node *n, Compile* C) { 853 int edges_to_n = 0; 854 855 uint cnt = req(); 856 for( uint i = 0; i < cnt; ++i ) { 857 if( in(i) == 0 ) continue; 858 if( in(i) == n ) ++edges_to_n; 859 set_req(i, NULL); 860 } 861 // Remove precedence edges if any exist 862 // Note: Safepoints may have precedence edges, even during parsing 863 if( (req() != len()) && (in(req()) != NULL) ) { 864 uint max = len(); 865 for( uint i = 0; i < max; ++i ) { 866 if( in(i) == 0 ) continue; 867 if( in(i) == n ) ++edges_to_n; 868 set_prec(i, NULL); 869 } 870 } 871 872 // Node::destruct requires all out edges be deleted first 873 // debug_only(destruct();) // no reuse benefit expected 874 if (edges_to_n == 0) { 875 C->record_dead_node(_idx); 876 } 877 return edges_to_n; 878 } 879 880 //-----------------------------uncast--------------------------------------- 881 // %%% Temporary, until we sort out CheckCastPP vs. CastPP. 882 // Strip away casting. (It is depth-limited.) 883 Node* Node::uncast() const { 884 // Should be inline: 885 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; 886 if (is_ConstraintCast() || is_CheckCastPP()) 887 return uncast_helper(this); 888 else 889 return (Node*) this; 890 } 891 892 // Find out of current node that matches opcode. 893 Node* Node::find_out_with(int opcode) { 894 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 895 Node* use = fast_out(i); 896 if (use->Opcode() == opcode) { 897 return use; 898 } 899 } 900 return NULL; 901 } 902 903 // Return true if the current node has an out that matches opcode. 904 bool Node::has_out_with(int opcode) { 905 return (find_out_with(opcode) != NULL); 906 } 907 908 // Return true if the current node has an out that matches any of the opcodes. 909 bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) { 910 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 911 int opcode = fast_out(i)->Opcode(); 912 if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) { 913 return true; 914 } 915 } 916 return false; 917 } 918 919 920 //---------------------------uncast_helper------------------------------------- 921 Node* Node::uncast_helper(const Node* p) { 922 #ifdef ASSERT 923 uint depth_count = 0; 924 const Node* orig_p = p; 925 #endif 926 927 while (true) { 928 #ifdef ASSERT 929 if (depth_count >= K) { 930 orig_p->dump(4); 931 if (p != orig_p) 932 p->dump(1); 933 } 934 assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); 935 #endif 936 if (p == NULL || p->req() != 2) { 937 break; 938 } else if (p->is_ConstraintCast()) { 939 p = p->in(1); 940 } else if (p->is_CheckCastPP()) { 941 p = p->in(1); 942 } else { 943 break; 944 } 945 } 946 return (Node*) p; 947 } 948 949 //------------------------------add_prec--------------------------------------- 950 // Add a new precedence input. Precedence inputs are unordered, with 951 // duplicates removed and NULLs packed down at the end. 952 void Node::add_prec( Node *n ) { 953 assert( is_not_dead(n), "can not use dead node"); 954 955 // Check for NULL at end 956 if( _cnt >= _max || in(_max-1) ) 957 grow( _max+1 ); 958 959 // Find a precedence edge to move 960 uint i = _cnt; 961 while( in(i) != NULL ) i++; 962 _in[i] = n; // Stuff prec edge over NULL 963 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge 964 } 965 966 //------------------------------rm_prec---------------------------------------- 967 // Remove a precedence input. Precedence inputs are unordered, with 968 // duplicates removed and NULLs packed down at the end. 969 void Node::rm_prec( uint j ) { 970 971 // Find end of precedence list to pack NULLs 972 uint i; 973 for( i=j; i<_max; i++ ) 974 if( !_in[i] ) // Find the NULL at end of prec edge list 975 break; 976 if (_in[j] != NULL) _in[j]->del_out((Node *)this); 977 _in[j] = _in[--i]; // Move last element over removed guy 978 _in[i] = NULL; // NULL out last element 979 } 980 981 //------------------------------size_of---------------------------------------- 982 uint Node::size_of() const { return sizeof(*this); } 983 984 //------------------------------ideal_reg-------------------------------------- 985 uint Node::ideal_reg() const { return 0; } 986 987 //------------------------------jvms------------------------------------------- 988 JVMState* Node::jvms() const { return NULL; } 989 990 #ifdef ASSERT 991 //------------------------------jvms------------------------------------------- 992 bool Node::verify_jvms(const JVMState* using_jvms) const { 993 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 994 if (jvms == using_jvms) return true; 995 } 996 return false; 997 } 998 999 //------------------------------init_NodeProperty------------------------------ 1000 void Node::init_NodeProperty() { 1001 assert(_max_classes <= max_jushort, "too many NodeProperty classes"); 1002 assert(_max_flags <= max_jushort, "too many NodeProperty flags"); 1003 } 1004 #endif 1005 1006 //------------------------------format----------------------------------------- 1007 // Print as assembly 1008 void Node::format( PhaseRegAlloc *, outputStream *st ) const {} 1009 //------------------------------emit------------------------------------------- 1010 // Emit bytes starting at parameter 'ptr'. 1011 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 1012 //------------------------------size------------------------------------------- 1013 // Size of instruction in bytes 1014 uint Node::size(PhaseRegAlloc *ra_) const { return 0; } 1015 1016 //------------------------------CFG Construction------------------------------- 1017 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root, 1018 // Goto and Return. 1019 const Node *Node::is_block_proj() const { return 0; } 1020 1021 // Minimum guaranteed type 1022 const Type *Node::bottom_type() const { return Type::BOTTOM; } 1023 1024 1025 //------------------------------raise_bottom_type------------------------------ 1026 // Get the worst-case Type output for this Node. 1027 void Node::raise_bottom_type(const Type* new_type) { 1028 if (is_Type()) { 1029 TypeNode *n = this->as_Type(); 1030 if (VerifyAliases) { 1031 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1032 } 1033 n->set_type(new_type); 1034 } else if (is_Load()) { 1035 LoadNode *n = this->as_Load(); 1036 if (VerifyAliases) { 1037 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1038 } 1039 n->set_type(new_type); 1040 } 1041 } 1042 1043 //------------------------------Identity--------------------------------------- 1044 // Return a node that the given node is equivalent to. 1045 Node *Node::Identity( PhaseTransform * ) { 1046 return this; // Default to no identities 1047 } 1048 1049 //------------------------------Value------------------------------------------ 1050 // Compute a new Type for a node using the Type of the inputs. 1051 const Type *Node::Value( PhaseTransform * ) const { 1052 return bottom_type(); // Default to worst-case Type 1053 } 1054 1055 //------------------------------Ideal------------------------------------------ 1056 // 1057 // 'Idealize' the graph rooted at this Node. 1058 // 1059 // In order to be efficient and flexible there are some subtle invariants 1060 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks 1061 // these invariants, although its too slow to have on by default. If you are 1062 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN! 1063 // 1064 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this' 1065 // pointer. If ANY change is made, it must return the root of the reshaped 1066 // graph - even if the root is the same Node. Example: swapping the inputs 1067 // to an AddINode gives the same answer and same root, but you still have to 1068 // return the 'this' pointer instead of NULL. 1069 // 1070 // You cannot return an OLD Node, except for the 'this' pointer. Use the 1071 // Identity call to return an old Node; basically if Identity can find 1072 // another Node have the Ideal call make no change and return NULL. 1073 // Example: AddINode::Ideal must check for add of zero; in this case it 1074 // returns NULL instead of doing any graph reshaping. 1075 // 1076 // You cannot modify any old Nodes except for the 'this' pointer. Due to 1077 // sharing there may be other users of the old Nodes relying on their current 1078 // semantics. Modifying them will break the other users. 1079 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 1080 // "X+3" unchanged in case it is shared. 1081 // 1082 // If you modify the 'this' pointer's inputs, you should use 1083 // 'set_req'. If you are making a new Node (either as the new root or 1084 // some new internal piece) you may use 'init_req' to set the initial 1085 // value. You can make a new Node with either 'new' or 'clone'. In 1086 // either case, def-use info is correctly maintained. 1087 // 1088 // Example: reshape "(X+3)+4" into "X+7": 1089 // set_req(1, in(1)->in(1)); 1090 // set_req(2, phase->intcon(7)); 1091 // return this; 1092 // Example: reshape "X*4" into "X<<2" 1093 // return new LShiftINode(in(1), phase->intcon(2)); 1094 // 1095 // You must call 'phase->transform(X)' on any new Nodes X you make, except 1096 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 1097 // Node *shift=phase->transform(new LShiftINode(in(1),phase->intcon(5))); 1098 // return new AddINode(shift, in(1)); 1099 // 1100 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 1101 // These forms are faster than 'phase->transform(new ConNode())' and Do 1102 // The Right Thing with def-use info. 1103 // 1104 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped 1105 // graph uses the 'this' Node it must be the root. If you want a Node with 1106 // the same Opcode as the 'this' pointer use 'clone'. 1107 // 1108 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { 1109 return NULL; // Default to being Ideal already 1110 } 1111 1112 // Some nodes have specific Ideal subgraph transformations only if they are 1113 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1114 // for the transformations to happen. 1115 bool Node::has_special_unique_user() const { 1116 assert(outcnt() == 1, "match only for unique out"); 1117 Node* n = unique_out(); 1118 int op = Opcode(); 1119 if (this->is_Store()) { 1120 // Condition for back-to-back stores folding. 1121 return n->Opcode() == op && n->in(MemNode::Memory) == this; 1122 } else if (this->is_Load()) { 1123 // Condition for removing an unused LoadNode from the MemBarAcquire precedence input 1124 return n->Opcode() == Op_MemBarAcquire; 1125 } else if (op == Op_AddL) { 1126 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) 1127 return n->Opcode() == Op_ConvL2I && n->in(1) == this; 1128 } else if (op == Op_SubI || op == Op_SubL) { 1129 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y) 1130 return n->Opcode() == op && n->in(2) == this; 1131 } else if (is_If() && (n->is_IfFalse() || n->is_IfTrue())) { 1132 // See IfProjNode::Identity() 1133 return true; 1134 } 1135 return false; 1136 }; 1137 1138 //--------------------------find_exact_control--------------------------------- 1139 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1140 Node* Node::find_exact_control(Node* ctrl) { 1141 if (ctrl == NULL && this->is_Region()) 1142 ctrl = this->as_Region()->is_copy(); 1143 1144 if (ctrl != NULL && ctrl->is_CatchProj()) { 1145 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) 1146 ctrl = ctrl->in(0); 1147 if (ctrl != NULL && !ctrl->is_top()) 1148 ctrl = ctrl->in(0); 1149 } 1150 1151 if (ctrl != NULL && ctrl->is_Proj()) 1152 ctrl = ctrl->in(0); 1153 1154 return ctrl; 1155 } 1156 1157 //--------------------------dominates------------------------------------------ 1158 // Helper function for MemNode::all_controls_dominate(). 1159 // Check if 'this' control node dominates or equal to 'sub' control node. 1160 // We already know that if any path back to Root or Start reaches 'this', 1161 // then all paths so, so this is a simple search for one example, 1162 // not an exhaustive search for a counterexample. 1163 bool Node::dominates(Node* sub, Node_List &nlist) { 1164 assert(this->is_CFG(), "expecting control"); 1165 assert(sub != NULL && sub->is_CFG(), "expecting control"); 1166 1167 // detect dead cycle without regions 1168 int iterations_without_region_limit = DominatorSearchLimit; 1169 1170 Node* orig_sub = sub; 1171 Node* dom = this; 1172 bool met_dom = false; 1173 nlist.clear(); 1174 1175 // Walk 'sub' backward up the chain to 'dom', watching for regions. 1176 // After seeing 'dom', continue up to Root or Start. 1177 // If we hit a region (backward split point), it may be a loop head. 1178 // Keep going through one of the region's inputs. If we reach the 1179 // same region again, go through a different input. Eventually we 1180 // will either exit through the loop head, or give up. 1181 // (If we get confused, break out and return a conservative 'false'.) 1182 while (sub != NULL) { 1183 if (sub->is_top()) break; // Conservative answer for dead code. 1184 if (sub == dom) { 1185 if (nlist.size() == 0) { 1186 // No Region nodes except loops were visited before and the EntryControl 1187 // path was taken for loops: it did not walk in a cycle. 1188 return true; 1189 } else if (met_dom) { 1190 break; // already met before: walk in a cycle 1191 } else { 1192 // Region nodes were visited. Continue walk up to Start or Root 1193 // to make sure that it did not walk in a cycle. 1194 met_dom = true; // first time meet 1195 iterations_without_region_limit = DominatorSearchLimit; // Reset 1196 } 1197 } 1198 if (sub->is_Start() || sub->is_Root()) { 1199 // Success if we met 'dom' along a path to Start or Root. 1200 // We assume there are no alternative paths that avoid 'dom'. 1201 // (This assumption is up to the caller to ensure!) 1202 return met_dom; 1203 } 1204 Node* up = sub->in(0); 1205 // Normalize simple pass-through regions and projections: 1206 up = sub->find_exact_control(up); 1207 // If sub == up, we found a self-loop. Try to push past it. 1208 if (sub == up && sub->is_Loop()) { 1209 // Take loop entry path on the way up to 'dom'. 1210 up = sub->in(1); // in(LoopNode::EntryControl); 1211 } else if (sub == up && sub->is_Region() && sub->req() != 3) { 1212 // Always take in(1) path on the way up to 'dom' for clone regions 1213 // (with only one input) or regions which merge > 2 paths 1214 // (usually used to merge fast/slow paths). 1215 up = sub->in(1); 1216 } else if (sub == up && sub->is_Region()) { 1217 // Try both paths for Regions with 2 input paths (it may be a loop head). 1218 // It could give conservative 'false' answer without information 1219 // which region's input is the entry path. 1220 iterations_without_region_limit = DominatorSearchLimit; // Reset 1221 1222 bool region_was_visited_before = false; 1223 // Was this Region node visited before? 1224 // If so, we have reached it because we accidentally took a 1225 // loop-back edge from 'sub' back into the body of the loop, 1226 // and worked our way up again to the loop header 'sub'. 1227 // So, take the first unexplored path on the way up to 'dom'. 1228 for (int j = nlist.size() - 1; j >= 0; j--) { 1229 intptr_t ni = (intptr_t)nlist.at(j); 1230 Node* visited = (Node*)(ni & ~1); 1231 bool visited_twice_already = ((ni & 1) != 0); 1232 if (visited == sub) { 1233 if (visited_twice_already) { 1234 // Visited 2 paths, but still stuck in loop body. Give up. 1235 return false; 1236 } 1237 // The Region node was visited before only once. 1238 // (We will repush with the low bit set, below.) 1239 nlist.remove(j); 1240 // We will find a new edge and re-insert. 1241 region_was_visited_before = true; 1242 break; 1243 } 1244 } 1245 1246 // Find an incoming edge which has not been seen yet; walk through it. 1247 assert(up == sub, ""); 1248 uint skip = region_was_visited_before ? 1 : 0; 1249 for (uint i = 1; i < sub->req(); i++) { 1250 Node* in = sub->in(i); 1251 if (in != NULL && !in->is_top() && in != sub) { 1252 if (skip == 0) { 1253 up = in; 1254 break; 1255 } 1256 --skip; // skip this nontrivial input 1257 } 1258 } 1259 1260 // Set 0 bit to indicate that both paths were taken. 1261 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); 1262 } 1263 1264 if (up == sub) { 1265 break; // some kind of tight cycle 1266 } 1267 if (up == orig_sub && met_dom) { 1268 // returned back after visiting 'dom' 1269 break; // some kind of cycle 1270 } 1271 if (--iterations_without_region_limit < 0) { 1272 break; // dead cycle 1273 } 1274 sub = up; 1275 } 1276 1277 // Did not meet Root or Start node in pred. chain. 1278 // Conservative answer for dead code. 1279 return false; 1280 } 1281 1282 //------------------------------remove_dead_region----------------------------- 1283 // This control node is dead. Follow the subgraph below it making everything 1284 // using it dead as well. This will happen normally via the usual IterGVN 1285 // worklist but this call is more efficient. Do not update use-def info 1286 // inside the dead region, just at the borders. 1287 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { 1288 // Con's are a popular node to re-hit in the hash table again. 1289 if( dead->is_Con() ) return; 1290 1291 // Can't put ResourceMark here since igvn->_worklist uses the same arena 1292 // for verify pass with +VerifyOpto and we add/remove elements in it here. 1293 Node_List nstack(Thread::current()->resource_area()); 1294 1295 Node *top = igvn->C->top(); 1296 nstack.push(dead); 1297 bool has_irreducible_loop = igvn->C->has_irreducible_loop(); 1298 1299 while (nstack.size() > 0) { 1300 dead = nstack.pop(); 1301 if (dead->outcnt() > 0) { 1302 // Keep dead node on stack until all uses are processed. 1303 nstack.push(dead); 1304 // For all Users of the Dead... ;-) 1305 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) { 1306 Node* use = dead->last_out(k); 1307 igvn->hash_delete(use); // Yank from hash table prior to mod 1308 if (use->in(0) == dead) { // Found another dead node 1309 assert (!use->is_Con(), "Control for Con node should be Root node."); 1310 use->set_req(0, top); // Cut dead edge to prevent processing 1311 nstack.push(use); // the dead node again. 1312 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop 1313 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode) 1314 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead 1315 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing 1316 use->set_req(0, top); // Cut self edge 1317 nstack.push(use); 1318 } else { // Else found a not-dead user 1319 // Dead if all inputs are top or null 1320 bool dead_use = !use->is_Root(); // Keep empty graph alive 1321 for (uint j = 1; j < use->req(); j++) { 1322 Node* in = use->in(j); 1323 if (in == dead) { // Turn all dead inputs into TOP 1324 use->set_req(j, top); 1325 } else if (in != NULL && !in->is_top()) { 1326 dead_use = false; 1327 } 1328 } 1329 if (dead_use) { 1330 if (use->is_Region()) { 1331 use->set_req(0, top); // Cut self edge 1332 } 1333 nstack.push(use); 1334 } else { 1335 igvn->_worklist.push(use); 1336 } 1337 } 1338 // Refresh the iterator, since any number of kills might have happened. 1339 k = dead->last_outs(kmin); 1340 } 1341 } else { // (dead->outcnt() == 0) 1342 // Done with outputs. 1343 igvn->hash_delete(dead); 1344 igvn->_worklist.remove(dead); 1345 igvn->C->remove_modified_node(dead); 1346 igvn->set_type(dead, Type::TOP); 1347 if (dead->is_macro()) { 1348 igvn->C->remove_macro_node(dead); 1349 } 1350 if (dead->is_expensive()) { 1351 igvn->C->remove_expensive_node(dead); 1352 } 1353 igvn->C->record_dead_node(dead->_idx); 1354 // Kill all inputs to the dead guy 1355 for (uint i=0; i < dead->req(); i++) { 1356 Node *n = dead->in(i); // Get input to dead guy 1357 if (n != NULL && !n->is_top()) { // Input is valid? 1358 dead->set_req(i, top); // Smash input away 1359 if (n->outcnt() == 0) { // Input also goes dead? 1360 if (!n->is_Con()) 1361 nstack.push(n); // Clear it out as well 1362 } else if (n->outcnt() == 1 && 1363 n->has_special_unique_user()) { 1364 igvn->add_users_to_worklist( n ); 1365 } else if (n->outcnt() <= 2 && n->is_Store()) { 1366 // Push store's uses on worklist to enable folding optimization for 1367 // store/store and store/load to the same address. 1368 // The restriction (outcnt() <= 2) is the same as in set_req_X() 1369 // and remove_globally_dead_node(). 1370 igvn->add_users_to_worklist( n ); 1371 } 1372 } 1373 } 1374 } // (dead->outcnt() == 0) 1375 } // while (nstack.size() > 0) for outputs 1376 return; 1377 } 1378 1379 //------------------------------remove_dead_region----------------------------- 1380 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { 1381 Node *n = in(0); 1382 if( !n ) return false; 1383 // Lost control into this guy? I.e., it became unreachable? 1384 // Aggressively kill all unreachable code. 1385 if (can_reshape && n->is_top()) { 1386 kill_dead_code(this, phase->is_IterGVN()); 1387 return false; // Node is dead. 1388 } 1389 1390 if( n->is_Region() && n->as_Region()->is_copy() ) { 1391 Node *m = n->nonnull_req(); 1392 set_req(0, m); 1393 return true; 1394 } 1395 return false; 1396 } 1397 1398 //------------------------------hash------------------------------------------- 1399 // Hash function over Nodes. 1400 uint Node::hash() const { 1401 uint sum = 0; 1402 for( uint i=0; i<_cnt; i++ ) // Add in all inputs 1403 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs 1404 return (sum>>2) + _cnt + Opcode(); 1405 } 1406 1407 //------------------------------cmp-------------------------------------------- 1408 // Compare special parts of simple Nodes 1409 uint Node::cmp( const Node &n ) const { 1410 return 1; // Must be same 1411 } 1412 1413 //------------------------------rematerialize----------------------------------- 1414 // Should we clone rather than spill this instruction? 1415 bool Node::rematerialize() const { 1416 if ( is_Mach() ) 1417 return this->as_Mach()->rematerialize(); 1418 else 1419 return (_flags & Flag_rematerialize) != 0; 1420 } 1421 1422 //------------------------------needs_anti_dependence_check--------------------- 1423 // Nodes which use memory without consuming it, hence need antidependences. 1424 bool Node::needs_anti_dependence_check() const { 1425 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 1426 return false; 1427 else 1428 return in(1)->bottom_type()->has_memory(); 1429 } 1430 1431 1432 // Get an integer constant from a ConNode (or CastIINode). 1433 // Return a default value if there is no apparent constant here. 1434 const TypeInt* Node::find_int_type() const { 1435 if (this->is_Type()) { 1436 return this->as_Type()->type()->isa_int(); 1437 } else if (this->is_Con()) { 1438 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1439 return this->bottom_type()->isa_int(); 1440 } 1441 return NULL; 1442 } 1443 1444 // Get a pointer constant from a ConstNode. 1445 // Returns the constant if it is a pointer ConstNode 1446 intptr_t Node::get_ptr() const { 1447 assert( Opcode() == Op_ConP, "" ); 1448 return ((ConPNode*)this)->type()->is_ptr()->get_con(); 1449 } 1450 1451 // Get a narrow oop constant from a ConNNode. 1452 intptr_t Node::get_narrowcon() const { 1453 assert( Opcode() == Op_ConN, "" ); 1454 return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); 1455 } 1456 1457 // Get a long constant from a ConNode. 1458 // Return a default value if there is no apparent constant here. 1459 const TypeLong* Node::find_long_type() const { 1460 if (this->is_Type()) { 1461 return this->as_Type()->type()->isa_long(); 1462 } else if (this->is_Con()) { 1463 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1464 return this->bottom_type()->isa_long(); 1465 } 1466 return NULL; 1467 } 1468 1469 1470 /** 1471 * Return a ptr type for nodes which should have it. 1472 */ 1473 const TypePtr* Node::get_ptr_type() const { 1474 const TypePtr* tp = this->bottom_type()->make_ptr(); 1475 #ifdef ASSERT 1476 if (tp == NULL) { 1477 this->dump(1); 1478 assert((tp != NULL), "unexpected node type"); 1479 } 1480 #endif 1481 return tp; 1482 } 1483 1484 // Get a double constant from a ConstNode. 1485 // Returns the constant if it is a double ConstNode 1486 jdouble Node::getd() const { 1487 assert( Opcode() == Op_ConD, "" ); 1488 return ((ConDNode*)this)->type()->is_double_constant()->getd(); 1489 } 1490 1491 // Get a float constant from a ConstNode. 1492 // Returns the constant if it is a float ConstNode 1493 jfloat Node::getf() const { 1494 assert( Opcode() == Op_ConF, "" ); 1495 return ((ConFNode*)this)->type()->is_float_constant()->getf(); 1496 } 1497 1498 #ifndef PRODUCT 1499 1500 //------------------------------find------------------------------------------ 1501 // Find a neighbor of this Node with the given _idx 1502 // If idx is negative, find its absolute value, following both _in and _out. 1503 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, 1504 VectorSet* old_space, VectorSet* new_space ) { 1505 int node_idx = (idx >= 0) ? idx : -idx; 1506 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. 1507 // Contained in new_space or old_space? Check old_arena first since it's mostly empty. 1508 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; 1509 if( v->test(n->_idx) ) return; 1510 if( (int)n->_idx == node_idx 1511 debug_only(|| n->debug_idx() == node_idx) ) { 1512 if (result != NULL) 1513 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n", 1514 (uintptr_t)result, (uintptr_t)n, node_idx); 1515 result = n; 1516 } 1517 v->set(n->_idx); 1518 for( uint i=0; i<n->len(); i++ ) { 1519 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; 1520 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); 1521 } 1522 // Search along forward edges also: 1523 if (idx < 0 && !only_ctrl) { 1524 for( uint j=0; j<n->outcnt(); j++ ) { 1525 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); 1526 } 1527 } 1528 #ifdef ASSERT 1529 // Search along debug_orig edges last, checking for cycles 1530 Node* orig = n->debug_orig(); 1531 if (orig != NULL) { 1532 do { 1533 if (NotANode(orig)) break; 1534 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); 1535 orig = orig->debug_orig(); 1536 } while (orig != NULL && orig != n->debug_orig()); 1537 } 1538 #endif //ASSERT 1539 } 1540 1541 // call this from debugger: 1542 Node* find_node(Node* n, int idx) { 1543 return n->find(idx); 1544 } 1545 1546 //------------------------------find------------------------------------------- 1547 Node* Node::find(int idx) const { 1548 ResourceArea *area = Thread::current()->resource_area(); 1549 VectorSet old_space(area), new_space(area); 1550 Node* result = NULL; 1551 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); 1552 return result; 1553 } 1554 1555 //------------------------------find_ctrl-------------------------------------- 1556 // Find an ancestor to this node in the control history with given _idx 1557 Node* Node::find_ctrl(int idx) const { 1558 ResourceArea *area = Thread::current()->resource_area(); 1559 VectorSet old_space(area), new_space(area); 1560 Node* result = NULL; 1561 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); 1562 return result; 1563 } 1564 #endif 1565 1566 1567 1568 #ifndef PRODUCT 1569 1570 // -----------------------------Name------------------------------------------- 1571 extern const char *NodeClassNames[]; 1572 const char *Node::Name() const { return NodeClassNames[Opcode()]; } 1573 1574 static bool is_disconnected(const Node* n) { 1575 for (uint i = 0; i < n->req(); i++) { 1576 if (n->in(i) != NULL) return false; 1577 } 1578 return true; 1579 } 1580 1581 #ifdef ASSERT 1582 static void dump_orig(Node* orig, outputStream *st) { 1583 Compile* C = Compile::current(); 1584 if (NotANode(orig)) orig = NULL; 1585 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1586 if (orig == NULL) return; 1587 st->print(" !orig="); 1588 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops 1589 if (NotANode(fast)) fast = NULL; 1590 while (orig != NULL) { 1591 bool discon = is_disconnected(orig); // if discon, print [123] else 123 1592 if (discon) st->print("["); 1593 if (!Compile::current()->node_arena()->contains(orig)) 1594 st->print("o"); 1595 st->print("%d", orig->_idx); 1596 if (discon) st->print("]"); 1597 orig = orig->debug_orig(); 1598 if (NotANode(orig)) orig = NULL; 1599 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1600 if (orig != NULL) st->print(","); 1601 if (fast != NULL) { 1602 // Step fast twice for each single step of orig: 1603 fast = fast->debug_orig(); 1604 if (NotANode(fast)) fast = NULL; 1605 if (fast != NULL && fast != orig) { 1606 fast = fast->debug_orig(); 1607 if (NotANode(fast)) fast = NULL; 1608 } 1609 if (fast == orig) { 1610 st->print("..."); 1611 break; 1612 } 1613 } 1614 } 1615 } 1616 1617 void Node::set_debug_orig(Node* orig) { 1618 _debug_orig = orig; 1619 if (BreakAtNode == 0) return; 1620 if (NotANode(orig)) orig = NULL; 1621 int trip = 10; 1622 while (orig != NULL) { 1623 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { 1624 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", 1625 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); 1626 BREAKPOINT; 1627 } 1628 orig = orig->debug_orig(); 1629 if (NotANode(orig)) orig = NULL; 1630 if (trip-- <= 0) break; 1631 } 1632 } 1633 #endif //ASSERT 1634 1635 //------------------------------dump------------------------------------------ 1636 // Dump a Node 1637 void Node::dump(const char* suffix, bool mark, outputStream *st) const { 1638 Compile* C = Compile::current(); 1639 bool is_new = C->node_arena()->contains(this); 1640 C->_in_dump_cnt++; 1641 st->print("%c%d%s\t%s\t=== ", is_new ? ' ' : 'o', _idx, mark ? " >" : "", Name()); 1642 1643 // Dump the required and precedence inputs 1644 dump_req(st); 1645 dump_prec(st); 1646 // Dump the outputs 1647 dump_out(st); 1648 1649 if (is_disconnected(this)) { 1650 #ifdef ASSERT 1651 st->print(" [%d]",debug_idx()); 1652 dump_orig(debug_orig(), st); 1653 #endif 1654 st->cr(); 1655 C->_in_dump_cnt--; 1656 return; // don't process dead nodes 1657 } 1658 1659 if (C->clone_map().value(_idx) != 0) { 1660 C->clone_map().dump(_idx); 1661 } 1662 // Dump node-specific info 1663 dump_spec(st); 1664 #ifdef ASSERT 1665 // Dump the non-reset _debug_idx 1666 if (Verbose && WizardMode) { 1667 st->print(" [%d]",debug_idx()); 1668 } 1669 #endif 1670 1671 const Type *t = bottom_type(); 1672 1673 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { 1674 const TypeInstPtr *toop = t->isa_instptr(); 1675 const TypeKlassPtr *tkls = t->isa_klassptr(); 1676 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); 1677 if (klass && klass->is_loaded() && klass->is_interface()) { 1678 st->print(" Interface:"); 1679 } else if (toop) { 1680 st->print(" Oop:"); 1681 } else if (tkls) { 1682 st->print(" Klass:"); 1683 } 1684 t->dump_on(st); 1685 } else if (t == Type::MEMORY) { 1686 st->print(" Memory:"); 1687 MemNode::dump_adr_type(this, adr_type(), st); 1688 } else if (Verbose || WizardMode) { 1689 st->print(" Type:"); 1690 if (t) { 1691 t->dump_on(st); 1692 } else { 1693 st->print("no type"); 1694 } 1695 } else if (t->isa_vect() && this->is_MachSpillCopy()) { 1696 // Dump MachSpillcopy vector type. 1697 t->dump_on(st); 1698 } 1699 if (is_new) { 1700 debug_only(dump_orig(debug_orig(), st)); 1701 Node_Notes* nn = C->node_notes_at(_idx); 1702 if (nn != NULL && !nn->is_clear()) { 1703 if (nn->jvms() != NULL) { 1704 st->print(" !jvms:"); 1705 nn->jvms()->dump_spec(st); 1706 } 1707 } 1708 } 1709 if (suffix) st->print("%s", suffix); 1710 C->_in_dump_cnt--; 1711 } 1712 1713 //------------------------------dump_req-------------------------------------- 1714 void Node::dump_req(outputStream *st) const { 1715 // Dump the required input edges 1716 for (uint i = 0; i < req(); i++) { // For all required inputs 1717 Node* d = in(i); 1718 if (d == NULL) { 1719 st->print("_ "); 1720 } else if (NotANode(d)) { 1721 st->print("NotANode "); // uninitialized, sentinel, garbage, etc. 1722 } else { 1723 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx); 1724 } 1725 } 1726 } 1727 1728 1729 //------------------------------dump_prec------------------------------------- 1730 void Node::dump_prec(outputStream *st) const { 1731 // Dump the precedence edges 1732 int any_prec = 0; 1733 for (uint i = req(); i < len(); i++) { // For all precedence inputs 1734 Node* p = in(i); 1735 if (p != NULL) { 1736 if (!any_prec++) st->print(" |"); 1737 if (NotANode(p)) { st->print("NotANode "); continue; } 1738 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1739 } 1740 } 1741 } 1742 1743 //------------------------------dump_out-------------------------------------- 1744 void Node::dump_out(outputStream *st) const { 1745 // Delimit the output edges 1746 st->print(" [["); 1747 // Dump the output edges 1748 for (uint i = 0; i < _outcnt; i++) { // For all outputs 1749 Node* u = _out[i]; 1750 if (u == NULL) { 1751 st->print("_ "); 1752 } else if (NotANode(u)) { 1753 st->print("NotANode "); 1754 } else { 1755 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx); 1756 } 1757 } 1758 st->print("]] "); 1759 } 1760 1761 //----------------------------collect_nodes_i---------------------------------- 1762 // Collects nodes from an Ideal graph, starting from a given start node and 1763 // moving in a given direction until a certain depth (distance from the start 1764 // node) is reached. Duplicates are ignored. 1765 // Arguments: 1766 // nstack: the nodes are collected into this array. 1767 // start: the node at which to start collecting. 1768 // direction: if this is a positive number, collect input nodes; if it is 1769 // a negative number, collect output nodes. 1770 // depth: collect nodes up to this distance from the start node. 1771 // include_start: whether to include the start node in the result collection. 1772 // only_ctrl: whether to regard control edges only during traversal. 1773 // only_data: whether to regard data edges only during traversal. 1774 static void collect_nodes_i(GrowableArray<Node*> *nstack, const Node* start, int direction, uint depth, bool include_start, bool only_ctrl, bool only_data) { 1775 Node* s = (Node*) start; // remove const 1776 nstack->append(s); 1777 int begin = 0; 1778 int end = 0; 1779 for(uint i = 0; i < depth; i++) { 1780 end = nstack->length(); 1781 for(int j = begin; j < end; j++) { 1782 Node* tp = nstack->at(j); 1783 uint limit = direction > 0 ? tp->len() : tp->outcnt(); 1784 for(uint k = 0; k < limit; k++) { 1785 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); 1786 1787 if (NotANode(n)) continue; 1788 // do not recurse through top or the root (would reach unrelated stuff) 1789 if (n->is_Root() || n->is_top()) continue; 1790 if (only_ctrl && !n->is_CFG()) continue; 1791 if (only_data && n->is_CFG()) continue; 1792 1793 bool on_stack = nstack->contains(n); 1794 if (!on_stack) { 1795 nstack->append(n); 1796 } 1797 } 1798 } 1799 begin = end; 1800 } 1801 if (!include_start) { 1802 nstack->remove(s); 1803 } 1804 } 1805 1806 //------------------------------dump_nodes------------------------------------- 1807 static void dump_nodes(const Node* start, int d, bool only_ctrl) { 1808 if (NotANode(start)) return; 1809 1810 GrowableArray <Node *> nstack(Compile::current()->live_nodes()); 1811 collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false); 1812 1813 int end = nstack.length(); 1814 if (d > 0) { 1815 for(int j = end-1; j >= 0; j--) { 1816 nstack.at(j)->dump(); 1817 } 1818 } else { 1819 for(int j = 0; j < end; j++) { 1820 nstack.at(j)->dump(); 1821 } 1822 } 1823 } 1824 1825 //------------------------------dump------------------------------------------- 1826 void Node::dump(int d) const { 1827 dump_nodes(this, d, false); 1828 } 1829 1830 //------------------------------dump_ctrl-------------------------------------- 1831 // Dump a Node's control history to depth 1832 void Node::dump_ctrl(int d) const { 1833 dump_nodes(this, d, true); 1834 } 1835 1836 //-----------------------------dump_compact------------------------------------ 1837 void Node::dump_comp() const { 1838 this->dump_comp("\n"); 1839 } 1840 1841 //-----------------------------dump_compact------------------------------------ 1842 // Dump a Node in compact representation, i.e., just print its name and index. 1843 // Nodes can specify additional specifics to print in compact representation by 1844 // implementing dump_compact_spec. 1845 void Node::dump_comp(const char* suffix, outputStream *st) const { 1846 Compile* C = Compile::current(); 1847 C->_in_dump_cnt++; 1848 st->print("%s(%d)", Name(), _idx); 1849 this->dump_compact_spec(st); 1850 if (suffix) { 1851 st->print("%s", suffix); 1852 } 1853 C->_in_dump_cnt--; 1854 } 1855 1856 //----------------------------dump_related------------------------------------- 1857 // Dump a Node's related nodes - the notion of "related" depends on the Node at 1858 // hand and is determined by the implementation of the virtual method rel. 1859 void Node::dump_related() const { 1860 Compile* C = Compile::current(); 1861 GrowableArray <Node *> in_rel(C->unique()); 1862 GrowableArray <Node *> out_rel(C->unique()); 1863 this->related(&in_rel, &out_rel, false); 1864 for (int i = in_rel.length() - 1; i >= 0; i--) { 1865 in_rel.at(i)->dump(); 1866 } 1867 this->dump("\n", true); 1868 for (int i = 0; i < out_rel.length(); i++) { 1869 out_rel.at(i)->dump(); 1870 } 1871 } 1872 1873 //----------------------------dump_related------------------------------------- 1874 // Dump a Node's related nodes up to a given depth (distance from the start 1875 // node). 1876 // Arguments: 1877 // d_in: depth for input nodes. 1878 // d_out: depth for output nodes (note: this also is a positive number). 1879 void Node::dump_related(uint d_in, uint d_out) const { 1880 Compile* C = Compile::current(); 1881 GrowableArray <Node *> in_rel(C->unique()); 1882 GrowableArray <Node *> out_rel(C->unique()); 1883 1884 // call collect_nodes_i directly 1885 collect_nodes_i(&in_rel, this, 1, d_in, false, false, false); 1886 collect_nodes_i(&out_rel, this, -1, d_out, false, false, false); 1887 1888 for (int i = in_rel.length() - 1; i >= 0; i--) { 1889 in_rel.at(i)->dump(); 1890 } 1891 this->dump("\n", true); 1892 for (int i = 0; i < out_rel.length(); i++) { 1893 out_rel.at(i)->dump(); 1894 } 1895 } 1896 1897 //------------------------dump_related_compact--------------------------------- 1898 // Dump a Node's related nodes in compact representation. The notion of 1899 // "related" depends on the Node at hand and is determined by the implementation 1900 // of the virtual method rel. 1901 void Node::dump_related_compact() const { 1902 Compile* C = Compile::current(); 1903 GrowableArray <Node *> in_rel(C->unique()); 1904 GrowableArray <Node *> out_rel(C->unique()); 1905 this->related(&in_rel, &out_rel, true); 1906 int n_in = in_rel.length(); 1907 int n_out = out_rel.length(); 1908 1909 this->dump_comp(n_in == 0 ? "\n" : " "); 1910 for (int i = 0; i < n_in; i++) { 1911 in_rel.at(i)->dump_comp(i == n_in - 1 ? "\n" : " "); 1912 } 1913 for (int i = 0; i < n_out; i++) { 1914 out_rel.at(i)->dump_comp(i == n_out - 1 ? "\n" : " "); 1915 } 1916 } 1917 1918 //------------------------------related---------------------------------------- 1919 // Collect a Node's related nodes. The default behaviour just collects the 1920 // inputs and outputs at depth 1, including both control and data flow edges, 1921 // regardless of whether the presentation is compact or not. For data nodes, 1922 // the default is to collect all data inputs (till level 1 if compact), and 1923 // outputs till level 1. 1924 void Node::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 1925 if (this->is_CFG()) { 1926 collect_nodes_i(in_rel, this, 1, 1, false, false, false); 1927 collect_nodes_i(out_rel, this, -1, 1, false, false, false); 1928 } else { 1929 if (compact) { 1930 this->collect_nodes(in_rel, 1, false, true); 1931 } else { 1932 this->collect_nodes_in_all_data(in_rel, false); 1933 } 1934 this->collect_nodes(out_rel, -1, false, false); 1935 } 1936 } 1937 1938 //---------------------------collect_nodes------------------------------------- 1939 // An entry point to the low-level node collection facility, to start from a 1940 // given node in the graph. The start node is by default not included in the 1941 // result. 1942 // Arguments: 1943 // ns: collect the nodes into this data structure. 1944 // d: the depth (distance from start node) to which nodes should be 1945 // collected. A value >0 indicates input nodes, a value <0, output 1946 // nodes. 1947 // ctrl: include only control nodes. 1948 // data: include only data nodes. 1949 void Node::collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const { 1950 if (ctrl && data) { 1951 // ignore nonsensical combination 1952 return; 1953 } 1954 collect_nodes_i(ns, this, d, (uint) ABS(d), false, ctrl, data); 1955 } 1956 1957 //--------------------------collect_nodes_in----------------------------------- 1958 static void collect_nodes_in(Node* start, GrowableArray<Node*> *ns, bool primary_is_data, bool collect_secondary) { 1959 // The maximum depth is determined using a BFS that visits all primary (data 1960 // or control) inputs and increments the depth at each level. 1961 uint d_in = 0; 1962 GrowableArray<Node*> nodes(Compile::current()->unique()); 1963 nodes.push(start); 1964 int nodes_at_current_level = 1; 1965 int n_idx = 0; 1966 while (nodes_at_current_level > 0) { 1967 // Add all primary inputs reachable from the current level to the list, and 1968 // increase the depth if there were any. 1969 int nodes_at_next_level = 0; 1970 bool nodes_added = false; 1971 while (nodes_at_current_level > 0) { 1972 nodes_at_current_level--; 1973 Node* current = nodes.at(n_idx++); 1974 for (uint i = 0; i < current->len(); i++) { 1975 Node* n = current->in(i); 1976 if (NotANode(n)) { 1977 continue; 1978 } 1979 if ((primary_is_data && n->is_CFG()) || (!primary_is_data && !n->is_CFG())) { 1980 continue; 1981 } 1982 if (!nodes.contains(n)) { 1983 nodes.push(n); 1984 nodes_added = true; 1985 nodes_at_next_level++; 1986 } 1987 } 1988 } 1989 if (nodes_added) { 1990 d_in++; 1991 } 1992 nodes_at_current_level = nodes_at_next_level; 1993 } 1994 start->collect_nodes(ns, d_in, !primary_is_data, primary_is_data); 1995 if (collect_secondary) { 1996 // Now, iterate over the secondary nodes in ns and add the respective 1997 // boundary reachable from them. 1998 GrowableArray<Node*> sns(Compile::current()->unique()); 1999 for (GrowableArrayIterator<Node*> it = ns->begin(); it != ns->end(); ++it) { 2000 Node* n = *it; 2001 n->collect_nodes(&sns, 1, primary_is_data, !primary_is_data); 2002 for (GrowableArrayIterator<Node*> d = sns.begin(); d != sns.end(); ++d) { 2003 ns->append_if_missing(*d); 2004 } 2005 sns.clear(); 2006 } 2007 } 2008 } 2009 2010 //---------------------collect_nodes_in_all_data------------------------------- 2011 // Collect the entire data input graph. Include the control boundary if 2012 // requested. 2013 // Arguments: 2014 // ns: collect the nodes into this data structure. 2015 // ctrl: if true, include the control boundary. 2016 void Node::collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const { 2017 collect_nodes_in((Node*) this, ns, true, ctrl); 2018 } 2019 2020 //--------------------------collect_nodes_in_all_ctrl-------------------------- 2021 // Collect the entire control input graph. Include the data boundary if 2022 // requested. 2023 // ns: collect the nodes into this data structure. 2024 // data: if true, include the control boundary. 2025 void Node::collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const { 2026 collect_nodes_in((Node*) this, ns, false, data); 2027 } 2028 2029 //------------------collect_nodes_out_all_ctrl_boundary------------------------ 2030 // Collect the entire output graph until hitting control node boundaries, and 2031 // include those. 2032 void Node::collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const { 2033 // Perform a BFS and stop at control nodes. 2034 GrowableArray<Node*> nodes(Compile::current()->unique()); 2035 nodes.push((Node*) this); 2036 while (nodes.length() > 0) { 2037 Node* current = nodes.pop(); 2038 if (NotANode(current)) { 2039 continue; 2040 } 2041 ns->append_if_missing(current); 2042 if (!current->is_CFG()) { 2043 for (DUIterator i = current->outs(); current->has_out(i); i++) { 2044 nodes.push(current->out(i)); 2045 } 2046 } 2047 } 2048 ns->remove((Node*) this); 2049 } 2050 2051 // VERIFICATION CODE 2052 // For each input edge to a node (ie - for each Use-Def edge), verify that 2053 // there is a corresponding Def-Use edge. 2054 //------------------------------verify_edges----------------------------------- 2055 void Node::verify_edges(Unique_Node_List &visited) { 2056 uint i, j, idx; 2057 int cnt; 2058 Node *n; 2059 2060 // Recursive termination test 2061 if (visited.member(this)) return; 2062 visited.push(this); 2063 2064 // Walk over all input edges, checking for correspondence 2065 for( i = 0; i < len(); i++ ) { 2066 n = in(i); 2067 if (n != NULL && !n->is_top()) { 2068 // Count instances of (Node *)this 2069 cnt = 0; 2070 for (idx = 0; idx < n->_outcnt; idx++ ) { 2071 if (n->_out[idx] == (Node *)this) cnt++; 2072 } 2073 assert( cnt > 0,"Failed to find Def-Use edge." ); 2074 // Check for duplicate edges 2075 // walk the input array downcounting the input edges to n 2076 for( j = 0; j < len(); j++ ) { 2077 if( in(j) == n ) cnt--; 2078 } 2079 assert( cnt == 0,"Mismatched edge count."); 2080 } else if (n == NULL) { 2081 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges"); 2082 } else { 2083 assert(n->is_top(), "sanity"); 2084 // Nothing to check. 2085 } 2086 } 2087 // Recursive walk over all input edges 2088 for( i = 0; i < len(); i++ ) { 2089 n = in(i); 2090 if( n != NULL ) 2091 in(i)->verify_edges(visited); 2092 } 2093 } 2094 2095 //------------------------------verify_recur----------------------------------- 2096 static const Node *unique_top = NULL; 2097 2098 void Node::verify_recur(const Node *n, int verify_depth, 2099 VectorSet &old_space, VectorSet &new_space) { 2100 if ( verify_depth == 0 ) return; 2101 if (verify_depth > 0) --verify_depth; 2102 2103 Compile* C = Compile::current(); 2104 2105 // Contained in new_space or old_space? 2106 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 2107 // Check for visited in the proper space. Numberings are not unique 2108 // across spaces so we need a separate VectorSet for each space. 2109 if( v->test_set(n->_idx) ) return; 2110 2111 if (n->is_Con() && n->bottom_type() == Type::TOP) { 2112 if (C->cached_top_node() == NULL) 2113 C->set_cached_top_node((Node*)n); 2114 assert(C->cached_top_node() == n, "TOP node must be unique"); 2115 } 2116 2117 for( uint i = 0; i < n->len(); i++ ) { 2118 Node *x = n->in(i); 2119 if (!x || x->is_top()) continue; 2120 2121 // Verify my input has a def-use edge to me 2122 if (true /*VerifyDefUse*/) { 2123 // Count use-def edges from n to x 2124 int cnt = 0; 2125 for( uint j = 0; j < n->len(); j++ ) 2126 if( n->in(j) == x ) 2127 cnt++; 2128 // Count def-use edges from x to n 2129 uint max = x->_outcnt; 2130 for( uint k = 0; k < max; k++ ) 2131 if (x->_out[k] == n) 2132 cnt--; 2133 assert( cnt == 0, "mismatched def-use edge counts" ); 2134 } 2135 2136 verify_recur(x, verify_depth, old_space, new_space); 2137 } 2138 2139 } 2140 2141 //------------------------------verify----------------------------------------- 2142 // Check Def-Use info for my subgraph 2143 void Node::verify() const { 2144 Compile* C = Compile::current(); 2145 Node* old_top = C->cached_top_node(); 2146 ResourceMark rm; 2147 ResourceArea *area = Thread::current()->resource_area(); 2148 VectorSet old_space(area), new_space(area); 2149 verify_recur(this, -1, old_space, new_space); 2150 C->set_cached_top_node(old_top); 2151 } 2152 #endif 2153 2154 2155 //------------------------------walk------------------------------------------- 2156 // Graph walk, with both pre-order and post-order functions 2157 void Node::walk(NFunc pre, NFunc post, void *env) { 2158 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk 2159 walk_(pre, post, env, visited); 2160 } 2161 2162 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) { 2163 if( visited.test_set(_idx) ) return; 2164 pre(*this,env); // Call the pre-order walk function 2165 for( uint i=0; i<_max; i++ ) 2166 if( in(i) ) // Input exists and is not walked? 2167 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions 2168 post(*this,env); // Call the post-order walk function 2169 } 2170 2171 void Node::nop(Node &, void*) {} 2172 2173 //------------------------------Registers-------------------------------------- 2174 // Do we Match on this edge index or not? Generally false for Control 2175 // and true for everything else. Weird for calls & returns. 2176 uint Node::match_edge(uint idx) const { 2177 return idx; // True for other than index 0 (control) 2178 } 2179 2180 static RegMask _not_used_at_all; 2181 // Register classes are defined for specific machines 2182 const RegMask &Node::out_RegMask() const { 2183 ShouldNotCallThis(); 2184 return _not_used_at_all; 2185 } 2186 2187 const RegMask &Node::in_RegMask(uint) const { 2188 ShouldNotCallThis(); 2189 return _not_used_at_all; 2190 } 2191 2192 //============================================================================= 2193 //----------------------------------------------------------------------------- 2194 void Node_Array::reset( Arena *new_arena ) { 2195 _a->Afree(_nodes,_max*sizeof(Node*)); 2196 _max = 0; 2197 _nodes = NULL; 2198 _a = new_arena; 2199 } 2200 2201 //------------------------------clear------------------------------------------ 2202 // Clear all entries in _nodes to NULL but keep storage 2203 void Node_Array::clear() { 2204 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) ); 2205 } 2206 2207 //----------------------------------------------------------------------------- 2208 void Node_Array::grow( uint i ) { 2209 if( !_max ) { 2210 _max = 1; 2211 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) ); 2212 _nodes[0] = NULL; 2213 } 2214 uint old = _max; 2215 while( i >= _max ) _max <<= 1; // Double to fit 2216 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*)); 2217 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) ); 2218 } 2219 2220 //----------------------------------------------------------------------------- 2221 void Node_Array::insert( uint i, Node *n ) { 2222 if( _nodes[_max-1] ) grow(_max); // Get more space if full 2223 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*))); 2224 _nodes[i] = n; 2225 } 2226 2227 //----------------------------------------------------------------------------- 2228 void Node_Array::remove( uint i ) { 2229 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*))); 2230 _nodes[_max-1] = NULL; 2231 } 2232 2233 //----------------------------------------------------------------------------- 2234 void Node_Array::sort( C_sort_func_t func) { 2235 qsort( _nodes, _max, sizeof( Node* ), func ); 2236 } 2237 2238 //----------------------------------------------------------------------------- 2239 void Node_Array::dump() const { 2240 #ifndef PRODUCT 2241 for( uint i = 0; i < _max; i++ ) { 2242 Node *nn = _nodes[i]; 2243 if( nn != NULL ) { 2244 tty->print("%5d--> ",i); nn->dump(); 2245 } 2246 } 2247 #endif 2248 } 2249 2250 //--------------------------is_iteratively_computed------------------------------ 2251 // Operation appears to be iteratively computed (such as an induction variable) 2252 // It is possible for this operation to return false for a loop-varying 2253 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 2254 bool Node::is_iteratively_computed() { 2255 if (ideal_reg()) { // does operation have a result register? 2256 for (uint i = 1; i < req(); i++) { 2257 Node* n = in(i); 2258 if (n != NULL && n->is_Phi()) { 2259 for (uint j = 1; j < n->req(); j++) { 2260 if (n->in(j) == this) { 2261 return true; 2262 } 2263 } 2264 } 2265 } 2266 } 2267 return false; 2268 } 2269 2270 //--------------------------find_similar------------------------------ 2271 // Return a node with opcode "opc" and same inputs as "this" if one can 2272 // be found; Otherwise return NULL; 2273 Node* Node::find_similar(int opc) { 2274 if (req() >= 2) { 2275 Node* def = in(1); 2276 if (def && def->outcnt() >= 2) { 2277 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) { 2278 Node* use = def->fast_out(i); 2279 if (use->Opcode() == opc && 2280 use->req() == req()) { 2281 uint j; 2282 for (j = 0; j < use->req(); j++) { 2283 if (use->in(j) != in(j)) { 2284 break; 2285 } 2286 } 2287 if (j == use->req()) { 2288 return use; 2289 } 2290 } 2291 } 2292 } 2293 } 2294 return NULL; 2295 } 2296 2297 2298 //--------------------------unique_ctrl_out------------------------------ 2299 // Return the unique control out if only one. Null if none or more than one. 2300 Node* Node::unique_ctrl_out() const { 2301 Node* found = NULL; 2302 for (uint i = 0; i < outcnt(); i++) { 2303 Node* use = raw_out(i); 2304 if (use->is_CFG() && use != this) { 2305 if (found != NULL) return NULL; 2306 found = use; 2307 } 2308 } 2309 return found; 2310 } 2311 2312 void Node::ensure_control_or_add_prec(Node* c) { 2313 if (in(0) == NULL) { 2314 set_req(0, c); 2315 } else if (in(0) != c) { 2316 add_prec(c); 2317 } 2318 } 2319 2320 //============================================================================= 2321 //------------------------------yank------------------------------------------- 2322 // Find and remove 2323 void Node_List::yank( Node *n ) { 2324 uint i; 2325 for( i = 0; i < _cnt; i++ ) 2326 if( _nodes[i] == n ) 2327 break; 2328 2329 if( i < _cnt ) 2330 _nodes[i] = _nodes[--_cnt]; 2331 } 2332 2333 //------------------------------dump------------------------------------------- 2334 void Node_List::dump() const { 2335 #ifndef PRODUCT 2336 for( uint i = 0; i < _cnt; i++ ) 2337 if( _nodes[i] ) { 2338 tty->print("%5d--> ",i); 2339 _nodes[i]->dump(); 2340 } 2341 #endif 2342 } 2343 2344 //============================================================================= 2345 //------------------------------remove----------------------------------------- 2346 void Unique_Node_List::remove( Node *n ) { 2347 if( _in_worklist[n->_idx] ) { 2348 for( uint i = 0; i < size(); i++ ) 2349 if( _nodes[i] == n ) { 2350 map(i,Node_List::pop()); 2351 _in_worklist >>= n->_idx; 2352 return; 2353 } 2354 ShouldNotReachHere(); 2355 } 2356 } 2357 2358 //-----------------------remove_useless_nodes---------------------------------- 2359 // Remove useless nodes from worklist 2360 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { 2361 2362 for( uint i = 0; i < size(); ++i ) { 2363 Node *n = at(i); 2364 assert( n != NULL, "Did not expect null entries in worklist"); 2365 if( ! useful.test(n->_idx) ) { 2366 _in_worklist >>= n->_idx; 2367 map(i,Node_List::pop()); 2368 // Node *replacement = Node_List::pop(); 2369 // if( i != size() ) { // Check if removing last entry 2370 // _nodes[i] = replacement; 2371 // } 2372 --i; // Visit popped node 2373 // If it was last entry, loop terminates since size() was also reduced 2374 } 2375 } 2376 } 2377 2378 //============================================================================= 2379 void Node_Stack::grow() { 2380 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top 2381 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode)); 2382 size_t max = old_max << 1; // max * 2 2383 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max); 2384 _inode_max = _inodes + max; 2385 _inode_top = _inodes + old_top; // restore _top 2386 } 2387 2388 // Node_Stack is used to map nodes. 2389 Node* Node_Stack::find(uint idx) const { 2390 uint sz = size(); 2391 for (uint i=0; i < sz; i++) { 2392 if (idx == index_at(i) ) 2393 return node_at(i); 2394 } 2395 return NULL; 2396 } 2397 2398 //============================================================================= 2399 uint TypeNode::size_of() const { return sizeof(*this); } 2400 #ifndef PRODUCT 2401 void TypeNode::dump_spec(outputStream *st) const { 2402 if( !Verbose && !WizardMode ) { 2403 // standard dump does this in Verbose and WizardMode 2404 st->print(" #"); _type->dump_on(st); 2405 } 2406 } 2407 2408 void TypeNode::dump_compact_spec(outputStream *st) const { 2409 st->print("#"); 2410 _type->dump_on(st); 2411 } 2412 #endif 2413 uint TypeNode::hash() const { 2414 return Node::hash() + _type->hash(); 2415 } 2416 uint TypeNode::cmp( const Node &n ) const 2417 { return !Type::cmp( _type, ((TypeNode&)n)._type ); } 2418 const Type *TypeNode::bottom_type() const { return _type; } 2419 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; } 2420 2421 //------------------------------ideal_reg-------------------------------------- 2422 uint TypeNode::ideal_reg() const { 2423 return _type->ideal_reg(); 2424 }