1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/c2/barrierSetC2.hpp" 28 #include "libadt/vectset.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/connode.hpp" 34 #include "opto/loopnode.hpp" 35 #include "opto/machnode.hpp" 36 #include "opto/matcher.hpp" 37 #include "opto/node.hpp" 38 #include "opto/opcodes.hpp" 39 #include "opto/regmask.hpp" 40 #include "opto/type.hpp" 41 #include "utilities/copy.hpp" 42 #include "utilities/macros.hpp" 43 44 class RegMask; 45 // #include "phase.hpp" 46 class PhaseTransform; 47 class PhaseGVN; 48 49 // Arena we are currently building Nodes in 50 const uint Node::NotAMachineReg = 0xffff0000; 51 52 #ifndef PRODUCT 53 extern int nodes_created; 54 #endif 55 #ifdef __clang__ 56 #pragma clang diagnostic push 57 #pragma GCC diagnostic ignored "-Wuninitialized" 58 #endif 59 60 #ifdef ASSERT 61 62 //-------------------------- construct_node------------------------------------ 63 // Set a breakpoint here to identify where a particular node index is built. 64 void Node::verify_construction() { 65 _debug_orig = NULL; 66 int old_debug_idx = Compile::debug_idx(); 67 int new_debug_idx = old_debug_idx+1; 68 if (new_debug_idx > 0) { 69 // Arrange that the lowest five decimal digits of _debug_idx 70 // will repeat those of _idx. In case this is somehow pathological, 71 // we continue to assign negative numbers (!) consecutively. 72 const int mod = 100000; 73 int bump = (int)(_idx - new_debug_idx) % mod; 74 if (bump < 0) bump += mod; 75 assert(bump >= 0 && bump < mod, ""); 76 new_debug_idx += bump; 77 } 78 Compile::set_debug_idx(new_debug_idx); 79 set_debug_idx( new_debug_idx ); 80 assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX"); 81 assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit"); 82 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { 83 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); 84 BREAKPOINT; 85 } 86 #if OPTO_DU_ITERATOR_ASSERT 87 _last_del = NULL; 88 _del_tick = 0; 89 #endif 90 _hash_lock = 0; 91 } 92 93 94 // #ifdef ASSERT ... 95 96 #if OPTO_DU_ITERATOR_ASSERT 97 void DUIterator_Common::sample(const Node* node) { 98 _vdui = VerifyDUIterators; 99 _node = node; 100 _outcnt = node->_outcnt; 101 _del_tick = node->_del_tick; 102 _last = NULL; 103 } 104 105 void DUIterator_Common::verify(const Node* node, bool at_end_ok) { 106 assert(_node == node, "consistent iterator source"); 107 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed"); 108 } 109 110 void DUIterator_Common::verify_resync() { 111 // Ensure that the loop body has just deleted the last guy produced. 112 const Node* node = _node; 113 // Ensure that at least one copy of the last-seen edge was deleted. 114 // Note: It is OK to delete multiple copies of the last-seen edge. 115 // Unfortunately, we have no way to verify that all the deletions delete 116 // that same edge. On this point we must use the Honor System. 117 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge"); 118 assert(node->_last_del == _last, "must have deleted the edge just produced"); 119 // We liked this deletion, so accept the resulting outcnt and tick. 120 _outcnt = node->_outcnt; 121 _del_tick = node->_del_tick; 122 } 123 124 void DUIterator_Common::reset(const DUIterator_Common& that) { 125 if (this == &that) return; // ignore assignment to self 126 if (!_vdui) { 127 // We need to initialize everything, overwriting garbage values. 128 _last = that._last; 129 _vdui = that._vdui; 130 } 131 // Note: It is legal (though odd) for an iterator over some node x 132 // to be reassigned to iterate over another node y. Some doubly-nested 133 // progress loops depend on being able to do this. 134 const Node* node = that._node; 135 // Re-initialize everything, except _last. 136 _node = node; 137 _outcnt = node->_outcnt; 138 _del_tick = node->_del_tick; 139 } 140 141 void DUIterator::sample(const Node* node) { 142 DUIterator_Common::sample(node); // Initialize the assertion data. 143 _refresh_tick = 0; // No refreshes have happened, as yet. 144 } 145 146 void DUIterator::verify(const Node* node, bool at_end_ok) { 147 DUIterator_Common::verify(node, at_end_ok); 148 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range"); 149 } 150 151 void DUIterator::verify_increment() { 152 if (_refresh_tick & 1) { 153 // We have refreshed the index during this loop. 154 // Fix up _idx to meet asserts. 155 if (_idx > _outcnt) _idx = _outcnt; 156 } 157 verify(_node, true); 158 } 159 160 void DUIterator::verify_resync() { 161 // Note: We do not assert on _outcnt, because insertions are OK here. 162 DUIterator_Common::verify_resync(); 163 // Make sure we are still in sync, possibly with no more out-edges: 164 verify(_node, true); 165 } 166 167 void DUIterator::reset(const DUIterator& that) { 168 if (this == &that) return; // self assignment is always a no-op 169 assert(that._refresh_tick == 0, "assign only the result of Node::outs()"); 170 assert(that._idx == 0, "assign only the result of Node::outs()"); 171 assert(_idx == that._idx, "already assigned _idx"); 172 if (!_vdui) { 173 // We need to initialize everything, overwriting garbage values. 174 sample(that._node); 175 } else { 176 DUIterator_Common::reset(that); 177 if (_refresh_tick & 1) { 178 _refresh_tick++; // Clear the "was refreshed" flag. 179 } 180 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly"); 181 } 182 } 183 184 void DUIterator::refresh() { 185 DUIterator_Common::sample(_node); // Re-fetch assertion data. 186 _refresh_tick |= 1; // Set the "was refreshed" flag. 187 } 188 189 void DUIterator::verify_finish() { 190 // If the loop has killed the node, do not require it to re-run. 191 if (_node->_outcnt == 0) _refresh_tick &= ~1; 192 // If this assert triggers, it means that a loop used refresh_out_pos 193 // to re-synch an iteration index, but the loop did not correctly 194 // re-run itself, using a "while (progress)" construct. 195 // This iterator enforces the rule that you must keep trying the loop 196 // until it "runs clean" without any need for refreshing. 197 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing"); 198 } 199 200 201 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) { 202 DUIterator_Common::verify(node, at_end_ok); 203 Node** out = node->_out; 204 uint cnt = node->_outcnt; 205 assert(cnt == _outcnt, "no insertions allowed"); 206 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range"); 207 // This last check is carefully designed to work for NO_OUT_ARRAY. 208 } 209 210 void DUIterator_Fast::verify_limit() { 211 const Node* node = _node; 212 verify(node, true); 213 assert(_outp == node->_out + node->_outcnt, "limit still correct"); 214 } 215 216 void DUIterator_Fast::verify_resync() { 217 const Node* node = _node; 218 if (_outp == node->_out + _outcnt) { 219 // Note that the limit imax, not the pointer i, gets updated with the 220 // exact count of deletions. (For the pointer it's always "--i".) 221 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)"); 222 // This is a limit pointer, with a name like "imax". 223 // Fudge the _last field so that the common assert will be happy. 224 _last = (Node*) node->_last_del; 225 DUIterator_Common::verify_resync(); 226 } else { 227 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)"); 228 // A normal internal pointer. 229 DUIterator_Common::verify_resync(); 230 // Make sure we are still in sync, possibly with no more out-edges: 231 verify(node, true); 232 } 233 } 234 235 void DUIterator_Fast::verify_relimit(uint n) { 236 const Node* node = _node; 237 assert((int)n > 0, "use imax -= n only with a positive count"); 238 // This must be a limit pointer, with a name like "imax". 239 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)"); 240 // The reported number of deletions must match what the node saw. 241 assert(node->_del_tick == _del_tick + n, "must have deleted n edges"); 242 // Fudge the _last field so that the common assert will be happy. 243 _last = (Node*) node->_last_del; 244 DUIterator_Common::verify_resync(); 245 } 246 247 void DUIterator_Fast::reset(const DUIterator_Fast& that) { 248 assert(_outp == that._outp, "already assigned _outp"); 249 DUIterator_Common::reset(that); 250 } 251 252 void DUIterator_Last::verify(const Node* node, bool at_end_ok) { 253 // at_end_ok means the _outp is allowed to underflow by 1 254 _outp += at_end_ok; 255 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc. 256 _outp -= at_end_ok; 257 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes"); 258 } 259 260 void DUIterator_Last::verify_limit() { 261 // Do not require the limit address to be resynched. 262 //verify(node, true); 263 assert(_outp == _node->_out, "limit still correct"); 264 } 265 266 void DUIterator_Last::verify_step(uint num_edges) { 267 assert((int)num_edges > 0, "need non-zero edge count for loop progress"); 268 _outcnt -= num_edges; 269 _del_tick += num_edges; 270 // Make sure we are still in sync, possibly with no more out-edges: 271 const Node* node = _node; 272 verify(node, true); 273 assert(node->_last_del == _last, "must have deleted the edge just produced"); 274 } 275 276 #endif //OPTO_DU_ITERATOR_ASSERT 277 278 279 #endif //ASSERT 280 281 282 // This constant used to initialize _out may be any non-null value. 283 // The value NULL is reserved for the top node only. 284 #define NO_OUT_ARRAY ((Node**)-1) 285 286 // Out-of-line code from node constructors. 287 // Executed only when extra debug info. is being passed around. 288 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) { 289 C->set_node_notes_at(idx, nn); 290 } 291 292 // Shared initialization code. 293 inline int Node::Init(int req) { 294 Compile* C = Compile::current(); 295 int idx = C->next_unique(); 296 297 // Allocate memory for the necessary number of edges. 298 if (req > 0) { 299 // Allocate space for _in array to have double alignment. 300 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*)))); 301 } 302 // If there are default notes floating around, capture them: 303 Node_Notes* nn = C->default_node_notes(); 304 if (nn != NULL) init_node_notes(C, idx, nn); 305 306 // Note: At this point, C is dead, 307 // and we begin to initialize the new Node. 308 309 _cnt = _max = req; 310 _outcnt = _outmax = 0; 311 _class_id = Class_Node; 312 _flags = 0; 313 _out = NO_OUT_ARRAY; 314 return idx; 315 } 316 317 //------------------------------Node------------------------------------------- 318 // Create a Node, with a given number of required edges. 319 Node::Node(uint req) 320 : _idx(Init(req)) 321 #ifdef ASSERT 322 , _parse_idx(_idx) 323 #endif 324 { 325 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" ); 326 debug_only( verify_construction() ); 327 NOT_PRODUCT(nodes_created++); 328 if (req == 0) { 329 _in = NULL; 330 } else { 331 Node** to = _in; 332 for(uint i = 0; i < req; i++) { 333 to[i] = NULL; 334 } 335 } 336 } 337 338 //------------------------------Node------------------------------------------- 339 Node::Node(Node *n0) 340 : _idx(Init(1)) 341 #ifdef ASSERT 342 , _parse_idx(_idx) 343 #endif 344 { 345 debug_only( verify_construction() ); 346 NOT_PRODUCT(nodes_created++); 347 assert( is_not_dead(n0), "can not use dead node"); 348 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 349 } 350 351 //------------------------------Node------------------------------------------- 352 Node::Node(Node *n0, Node *n1) 353 : _idx(Init(2)) 354 #ifdef ASSERT 355 , _parse_idx(_idx) 356 #endif 357 { 358 debug_only( verify_construction() ); 359 NOT_PRODUCT(nodes_created++); 360 assert( is_not_dead(n0), "can not use dead node"); 361 assert( is_not_dead(n1), "can not use dead node"); 362 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 363 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 364 } 365 366 //------------------------------Node------------------------------------------- 367 Node::Node(Node *n0, Node *n1, Node *n2) 368 : _idx(Init(3)) 369 #ifdef ASSERT 370 , _parse_idx(_idx) 371 #endif 372 { 373 debug_only( verify_construction() ); 374 NOT_PRODUCT(nodes_created++); 375 assert( is_not_dead(n0), "can not use dead node"); 376 assert( is_not_dead(n1), "can not use dead node"); 377 assert( is_not_dead(n2), "can not use dead node"); 378 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 379 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 380 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 381 } 382 383 //------------------------------Node------------------------------------------- 384 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) 385 : _idx(Init(4)) 386 #ifdef ASSERT 387 , _parse_idx(_idx) 388 #endif 389 { 390 debug_only( verify_construction() ); 391 NOT_PRODUCT(nodes_created++); 392 assert( is_not_dead(n0), "can not use dead node"); 393 assert( is_not_dead(n1), "can not use dead node"); 394 assert( is_not_dead(n2), "can not use dead node"); 395 assert( is_not_dead(n3), "can not use dead node"); 396 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 397 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 398 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 399 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 400 } 401 402 //------------------------------Node------------------------------------------- 403 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) 404 : _idx(Init(5)) 405 #ifdef ASSERT 406 , _parse_idx(_idx) 407 #endif 408 { 409 debug_only( verify_construction() ); 410 NOT_PRODUCT(nodes_created++); 411 assert( is_not_dead(n0), "can not use dead node"); 412 assert( is_not_dead(n1), "can not use dead node"); 413 assert( is_not_dead(n2), "can not use dead node"); 414 assert( is_not_dead(n3), "can not use dead node"); 415 assert( is_not_dead(n4), "can not use dead node"); 416 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 417 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 418 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 419 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 420 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 421 } 422 423 //------------------------------Node------------------------------------------- 424 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 425 Node *n4, Node *n5) 426 : _idx(Init(6)) 427 #ifdef ASSERT 428 , _parse_idx(_idx) 429 #endif 430 { 431 debug_only( verify_construction() ); 432 NOT_PRODUCT(nodes_created++); 433 assert( is_not_dead(n0), "can not use dead node"); 434 assert( is_not_dead(n1), "can not use dead node"); 435 assert( is_not_dead(n2), "can not use dead node"); 436 assert( is_not_dead(n3), "can not use dead node"); 437 assert( is_not_dead(n4), "can not use dead node"); 438 assert( is_not_dead(n5), "can not use dead node"); 439 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 440 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 441 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 442 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 443 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 444 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 445 } 446 447 //------------------------------Node------------------------------------------- 448 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 449 Node *n4, Node *n5, Node *n6) 450 : _idx(Init(7)) 451 #ifdef ASSERT 452 , _parse_idx(_idx) 453 #endif 454 { 455 debug_only( verify_construction() ); 456 NOT_PRODUCT(nodes_created++); 457 assert( is_not_dead(n0), "can not use dead node"); 458 assert( is_not_dead(n1), "can not use dead node"); 459 assert( is_not_dead(n2), "can not use dead node"); 460 assert( is_not_dead(n3), "can not use dead node"); 461 assert( is_not_dead(n4), "can not use dead node"); 462 assert( is_not_dead(n5), "can not use dead node"); 463 assert( is_not_dead(n6), "can not use dead node"); 464 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 465 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 466 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 467 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 468 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 469 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 470 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); 471 } 472 473 #ifdef __clang__ 474 #pragma clang diagnostic pop 475 #endif 476 477 478 //------------------------------clone------------------------------------------ 479 // Clone a Node. 480 Node *Node::clone() const { 481 Compile* C = Compile::current(); 482 uint s = size_of(); // Size of inherited Node 483 Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); 484 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); 485 // Set the new input pointer array 486 n->_in = (Node**)(((char*)n)+s); 487 // Cannot share the old output pointer array, so kill it 488 n->_out = NO_OUT_ARRAY; 489 // And reset the counters to 0 490 n->_outcnt = 0; 491 n->_outmax = 0; 492 // Unlock this guy, since he is not in any hash table. 493 debug_only(n->_hash_lock = 0); 494 // Walk the old node's input list to duplicate its edges 495 uint i; 496 for( i = 0; i < len(); i++ ) { 497 Node *x = in(i); 498 n->_in[i] = x; 499 if (x != NULL) x->add_out(n); 500 } 501 if (is_macro()) 502 C->add_macro_node(n); 503 if (is_expensive()) 504 C->add_expensive_node(n); 505 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 506 bs->register_potential_barrier_node(n); 507 // If the cloned node is a range check dependent CastII, add it to the list. 508 CastIINode* cast = n->isa_CastII(); 509 if (cast != NULL && cast->has_range_check()) { 510 C->add_range_check_cast(cast); 511 } 512 if (n->Opcode() == Op_Opaque4) { 513 C->add_opaque4_node(n); 514 } 515 516 n->set_idx(C->next_unique()); // Get new unique index as well 517 debug_only( n->verify_construction() ); 518 NOT_PRODUCT(nodes_created++); 519 // Do not patch over the debug_idx of a clone, because it makes it 520 // impossible to break on the clone's moment of creation. 521 //debug_only( n->set_debug_idx( debug_idx() ) ); 522 523 C->copy_node_notes_to(n, (Node*) this); 524 525 // MachNode clone 526 uint nopnds; 527 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) { 528 MachNode *mach = n->as_Mach(); 529 MachNode *mthis = this->as_Mach(); 530 // Get address of _opnd_array. 531 // It should be the same offset since it is the clone of this node. 532 MachOper **from = mthis->_opnds; 533 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 534 pointer_delta((const void*)from, 535 (const void*)(&mthis->_opnds), 1)); 536 mach->_opnds = to; 537 for ( uint i = 0; i < nopnds; ++i ) { 538 to[i] = from[i]->clone(); 539 } 540 } 541 // cloning CallNode may need to clone JVMState 542 if (n->is_Call()) { 543 n->as_Call()->clone_jvms(C); 544 } 545 if (n->is_SafePoint()) { 546 n->as_SafePoint()->clone_replaced_nodes(); 547 } 548 return n; // Return the clone 549 } 550 551 //---------------------------setup_is_top-------------------------------------- 552 // Call this when changing the top node, to reassert the invariants 553 // required by Node::is_top. See Compile::set_cached_top_node. 554 void Node::setup_is_top() { 555 if (this == (Node*)Compile::current()->top()) { 556 // This node has just become top. Kill its out array. 557 _outcnt = _outmax = 0; 558 _out = NULL; // marker value for top 559 assert(is_top(), "must be top"); 560 } else { 561 if (_out == NULL) _out = NO_OUT_ARRAY; 562 assert(!is_top(), "must not be top"); 563 } 564 } 565 566 567 //------------------------------~Node------------------------------------------ 568 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 569 void Node::destruct() { 570 // Eagerly reclaim unique Node numberings 571 Compile* compile = Compile::current(); 572 if ((uint)_idx+1 == compile->unique()) { 573 compile->set_unique(compile->unique()-1); 574 } 575 // Clear debug info: 576 Node_Notes* nn = compile->node_notes_at(_idx); 577 if (nn != NULL) nn->clear(); 578 // Walk the input array, freeing the corresponding output edges 579 _cnt = _max; // forget req/prec distinction 580 uint i; 581 for( i = 0; i < _max; i++ ) { 582 set_req(i, NULL); 583 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); 584 } 585 assert(outcnt() == 0, "deleting a node must not leave a dangling use"); 586 // See if the input array was allocated just prior to the object 587 int edge_size = _max*sizeof(void*); 588 int out_edge_size = _outmax*sizeof(void*); 589 char *edge_end = ((char*)_in) + edge_size; 590 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); 591 int node_size = size_of(); 592 593 // Free the output edge array 594 if (out_edge_size > 0) { 595 compile->node_arena()->Afree(out_array, out_edge_size); 596 } 597 598 // Free the input edge array and the node itself 599 if( edge_end == (char*)this ) { 600 // It was; free the input array and object all in one hit 601 #ifndef ASSERT 602 compile->node_arena()->Afree(_in,edge_size+node_size); 603 #endif 604 } else { 605 // Free just the input array 606 compile->node_arena()->Afree(_in,edge_size); 607 608 // Free just the object 609 #ifndef ASSERT 610 compile->node_arena()->Afree(this,node_size); 611 #endif 612 } 613 if (is_macro()) { 614 compile->remove_macro_node(this); 615 } 616 if (is_expensive()) { 617 compile->remove_expensive_node(this); 618 } 619 CastIINode* cast = isa_CastII(); 620 if (cast != NULL && cast->has_range_check()) { 621 compile->remove_range_check_cast(cast); 622 } 623 if (Opcode() == Op_Opaque4) { 624 compile->remove_opaque4_node(this); 625 } 626 627 if (is_SafePoint()) { 628 as_SafePoint()->delete_replaced_nodes(); 629 } 630 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 631 bs->unregister_potential_barrier_node(this); 632 #ifdef ASSERT 633 // We will not actually delete the storage, but we'll make the node unusable. 634 *(address*)this = badAddress; // smash the C++ vtbl, probably 635 _in = _out = (Node**) badAddress; 636 _max = _cnt = _outmax = _outcnt = 0; 637 compile->remove_modified_node(this); 638 #endif 639 } 640 641 //------------------------------grow------------------------------------------- 642 // Grow the input array, making space for more edges 643 void Node::grow( uint len ) { 644 Arena* arena = Compile::current()->node_arena(); 645 uint new_max = _max; 646 if( new_max == 0 ) { 647 _max = 4; 648 _in = (Node**)arena->Amalloc(4*sizeof(Node*)); 649 Node** to = _in; 650 to[0] = NULL; 651 to[1] = NULL; 652 to[2] = NULL; 653 to[3] = NULL; 654 return; 655 } 656 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 657 // Trimming to limit allows a uint8 to handle up to 255 edges. 658 // Previously I was using only powers-of-2 which peaked at 128 edges. 659 //if( new_max >= limit ) new_max = limit-1; 660 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); 661 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space 662 _max = new_max; // Record new max length 663 // This assertion makes sure that Node::_max is wide enough to 664 // represent the numerical value of new_max. 665 assert(_max == new_max && _max > len, "int width of _max is too small"); 666 } 667 668 //-----------------------------out_grow---------------------------------------- 669 // Grow the input array, making space for more edges 670 void Node::out_grow( uint len ) { 671 assert(!is_top(), "cannot grow a top node's out array"); 672 Arena* arena = Compile::current()->node_arena(); 673 uint new_max = _outmax; 674 if( new_max == 0 ) { 675 _outmax = 4; 676 _out = (Node **)arena->Amalloc(4*sizeof(Node*)); 677 return; 678 } 679 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 680 // Trimming to limit allows a uint8 to handle up to 255 edges. 681 // Previously I was using only powers-of-2 which peaked at 128 edges. 682 //if( new_max >= limit ) new_max = limit-1; 683 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); 684 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); 685 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space 686 _outmax = new_max; // Record new max length 687 // This assertion makes sure that Node::_max is wide enough to 688 // represent the numerical value of new_max. 689 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small"); 690 } 691 692 #ifdef ASSERT 693 //------------------------------is_dead---------------------------------------- 694 bool Node::is_dead() const { 695 // Mach and pinch point nodes may look like dead. 696 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) 697 return false; 698 for( uint i = 0; i < _max; i++ ) 699 if( _in[i] != NULL ) 700 return false; 701 dump(); 702 return true; 703 } 704 #endif 705 706 707 //------------------------------is_unreachable--------------------------------- 708 bool Node::is_unreachable(PhaseIterGVN &igvn) const { 709 assert(!is_Mach(), "doesn't work with MachNodes"); 710 return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top()); 711 } 712 713 //------------------------------add_req---------------------------------------- 714 // Add a new required input at the end 715 void Node::add_req( Node *n ) { 716 assert( is_not_dead(n), "can not use dead node"); 717 718 // Look to see if I can move precedence down one without reallocating 719 if( (_cnt >= _max) || (in(_max-1) != NULL) ) 720 grow( _max+1 ); 721 722 // Find a precedence edge to move 723 if( in(_cnt) != NULL ) { // Next precedence edge is busy? 724 uint i; 725 for( i=_cnt; i<_max; i++ ) 726 if( in(i) == NULL ) // Find the NULL at end of prec edge list 727 break; // There must be one, since we grew the array 728 _in[i] = in(_cnt); // Move prec over, making space for req edge 729 } 730 _in[_cnt++] = n; // Stuff over old prec edge 731 if (n != NULL) n->add_out((Node *)this); 732 } 733 734 //---------------------------add_req_batch------------------------------------- 735 // Add a new required input at the end 736 void Node::add_req_batch( Node *n, uint m ) { 737 assert( is_not_dead(n), "can not use dead node"); 738 // check various edge cases 739 if ((int)m <= 1) { 740 assert((int)m >= 0, "oob"); 741 if (m != 0) add_req(n); 742 return; 743 } 744 745 // Look to see if I can move precedence down one without reallocating 746 if( (_cnt+m) > _max || _in[_max-m] ) 747 grow( _max+m ); 748 749 // Find a precedence edge to move 750 if( _in[_cnt] != NULL ) { // Next precedence edge is busy? 751 uint i; 752 for( i=_cnt; i<_max; i++ ) 753 if( _in[i] == NULL ) // Find the NULL at end of prec edge list 754 break; // There must be one, since we grew the array 755 // Slide all the precs over by m positions (assume #prec << m). 756 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); 757 } 758 759 // Stuff over the old prec edges 760 for(uint i=0; i<m; i++ ) { 761 _in[_cnt++] = n; 762 } 763 764 // Insert multiple out edges on the node. 765 if (n != NULL && !n->is_top()) { 766 for(uint i=0; i<m; i++ ) { 767 n->add_out((Node *)this); 768 } 769 } 770 } 771 772 //------------------------------del_req---------------------------------------- 773 // Delete the required edge and compact the edge array 774 void Node::del_req( uint idx ) { 775 assert( idx < _cnt, "oob"); 776 assert( !VerifyHashTableKeys || _hash_lock == 0, 777 "remove node from hash table before modifying it"); 778 // First remove corresponding def-use edge 779 Node *n = in(idx); 780 if (n != NULL) n->del_out((Node *)this); 781 _in[idx] = in(--_cnt); // Compact the array 782 // Avoid spec violation: Gap in prec edges. 783 close_prec_gap_at(_cnt); 784 Compile::current()->record_modified_node(this); 785 } 786 787 //------------------------------del_req_ordered-------------------------------- 788 // Delete the required edge and compact the edge array with preserved order 789 void Node::del_req_ordered( uint idx ) { 790 assert( idx < _cnt, "oob"); 791 assert( !VerifyHashTableKeys || _hash_lock == 0, 792 "remove node from hash table before modifying it"); 793 // First remove corresponding def-use edge 794 Node *n = in(idx); 795 if (n != NULL) n->del_out((Node *)this); 796 if (idx < --_cnt) { // Not last edge ? 797 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*))); 798 } 799 // Avoid spec violation: Gap in prec edges. 800 close_prec_gap_at(_cnt); 801 Compile::current()->record_modified_node(this); 802 } 803 804 //------------------------------ins_req---------------------------------------- 805 // Insert a new required input at the end 806 void Node::ins_req( uint idx, Node *n ) { 807 assert( is_not_dead(n), "can not use dead node"); 808 add_req(NULL); // Make space 809 assert( idx < _max, "Must have allocated enough space"); 810 // Slide over 811 if(_cnt-idx-1 > 0) { 812 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); 813 } 814 _in[idx] = n; // Stuff over old required edge 815 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge 816 } 817 818 //-----------------------------find_edge--------------------------------------- 819 int Node::find_edge(Node* n) { 820 for (uint i = 0; i < len(); i++) { 821 if (_in[i] == n) return i; 822 } 823 return -1; 824 } 825 826 //----------------------------replace_edge------------------------------------- 827 int Node::replace_edge(Node* old, Node* neww) { 828 if (old == neww) return 0; // nothing to do 829 uint nrep = 0; 830 for (uint i = 0; i < len(); i++) { 831 if (in(i) == old) { 832 if (i < req()) { 833 set_req(i, neww); 834 } else { 835 assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx); 836 set_prec(i, neww); 837 } 838 nrep++; 839 } 840 } 841 return nrep; 842 } 843 844 /** 845 * Replace input edges in the range pointing to 'old' node. 846 */ 847 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) { 848 if (old == neww) return 0; // nothing to do 849 uint nrep = 0; 850 for (int i = start; i < end; i++) { 851 if (in(i) == old) { 852 set_req(i, neww); 853 nrep++; 854 } 855 } 856 return nrep; 857 } 858 859 //-------------------------disconnect_inputs----------------------------------- 860 // NULL out all inputs to eliminate incoming Def-Use edges. 861 // Return the number of edges between 'n' and 'this' 862 int Node::disconnect_inputs(Node *n, Compile* C) { 863 int edges_to_n = 0; 864 865 uint cnt = req(); 866 for( uint i = 0; i < cnt; ++i ) { 867 if( in(i) == 0 ) continue; 868 if( in(i) == n ) ++edges_to_n; 869 set_req(i, NULL); 870 } 871 // Remove precedence edges if any exist 872 // Note: Safepoints may have precedence edges, even during parsing 873 if( (req() != len()) && (in(req()) != NULL) ) { 874 uint max = len(); 875 for( uint i = 0; i < max; ++i ) { 876 if( in(i) == 0 ) continue; 877 if( in(i) == n ) ++edges_to_n; 878 set_prec(i, NULL); 879 } 880 } 881 882 // Node::destruct requires all out edges be deleted first 883 // debug_only(destruct();) // no reuse benefit expected 884 if (edges_to_n == 0) { 885 C->record_dead_node(_idx); 886 } 887 return edges_to_n; 888 } 889 890 //-----------------------------uncast--------------------------------------- 891 // %%% Temporary, until we sort out CheckCastPP vs. CastPP. 892 // Strip away casting. (It is depth-limited.) 893 Node* Node::uncast() const { 894 // Should be inline: 895 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; 896 if (is_ConstraintCast()) 897 return uncast_helper(this); 898 else 899 return (Node*) this; 900 } 901 902 bool Node::eqv_uncast(const Node* n) const { 903 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 904 Node* obj1 = bs->step_over_gc_barrier(const_cast<Node*>(this)); 905 Node* obj2 = bs->step_over_gc_barrier(const_cast<Node*>(n)); 906 return (obj1->uncast() == obj2->uncast()); 907 } 908 909 // Find out of current node that matches opcode. 910 Node* Node::find_out_with(int opcode) { 911 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 912 Node* use = fast_out(i); 913 if (use->Opcode() == opcode) { 914 return use; 915 } 916 } 917 return NULL; 918 } 919 920 // Return true if the current node has an out that matches opcode. 921 bool Node::has_out_with(int opcode) { 922 return (find_out_with(opcode) != NULL); 923 } 924 925 // Return true if the current node has an out that matches any of the opcodes. 926 bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) { 927 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 928 int opcode = fast_out(i)->Opcode(); 929 if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) { 930 return true; 931 } 932 } 933 return false; 934 } 935 936 937 //---------------------------uncast_helper------------------------------------- 938 Node* Node::uncast_helper(const Node* p) { 939 #ifdef ASSERT 940 uint depth_count = 0; 941 const Node* orig_p = p; 942 #endif 943 944 while (true) { 945 #ifdef ASSERT 946 if (depth_count >= K) { 947 orig_p->dump(4); 948 if (p != orig_p) 949 p->dump(1); 950 } 951 assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); 952 #endif 953 if (p == NULL || p->req() != 2) { 954 break; 955 } else if (p->is_ConstraintCast()) { 956 p = p->in(1); 957 } else { 958 break; 959 } 960 } 961 return (Node*) p; 962 } 963 964 //------------------------------add_prec--------------------------------------- 965 // Add a new precedence input. Precedence inputs are unordered, with 966 // duplicates removed and NULLs packed down at the end. 967 void Node::add_prec( Node *n ) { 968 assert( is_not_dead(n), "can not use dead node"); 969 970 // Check for NULL at end 971 if( _cnt >= _max || in(_max-1) ) 972 grow( _max+1 ); 973 974 // Find a precedence edge to move 975 uint i = _cnt; 976 while( in(i) != NULL ) { 977 if (in(i) == n) return; // Avoid spec violation: duplicated prec edge. 978 i++; 979 } 980 _in[i] = n; // Stuff prec edge over NULL 981 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge 982 983 #ifdef ASSERT 984 while ((++i)<_max) { assert(_in[i] == NULL, "spec violation: Gap in prec edges (node %d)", _idx); } 985 #endif 986 } 987 988 //------------------------------rm_prec---------------------------------------- 989 // Remove a precedence input. Precedence inputs are unordered, with 990 // duplicates removed and NULLs packed down at the end. 991 void Node::rm_prec( uint j ) { 992 assert(j < _max, "oob: i=%d, _max=%d", j, _max); 993 assert(j >= _cnt, "not a precedence edge"); 994 if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges. 995 _in[j]->del_out((Node *)this); 996 close_prec_gap_at(j); 997 } 998 999 //------------------------------size_of---------------------------------------- 1000 uint Node::size_of() const { return sizeof(*this); } 1001 1002 //------------------------------ideal_reg-------------------------------------- 1003 uint Node::ideal_reg() const { return 0; } 1004 1005 //------------------------------jvms------------------------------------------- 1006 JVMState* Node::jvms() const { return NULL; } 1007 1008 #ifdef ASSERT 1009 //------------------------------jvms------------------------------------------- 1010 bool Node::verify_jvms(const JVMState* using_jvms) const { 1011 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 1012 if (jvms == using_jvms) return true; 1013 } 1014 return false; 1015 } 1016 1017 //------------------------------init_NodeProperty------------------------------ 1018 void Node::init_NodeProperty() { 1019 assert(_max_classes <= max_jushort, "too many NodeProperty classes"); 1020 assert(_max_flags <= max_jushort, "too many NodeProperty flags"); 1021 } 1022 #endif 1023 1024 //------------------------------format----------------------------------------- 1025 // Print as assembly 1026 void Node::format( PhaseRegAlloc *, outputStream *st ) const {} 1027 //------------------------------emit------------------------------------------- 1028 // Emit bytes starting at parameter 'ptr'. 1029 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 1030 //------------------------------size------------------------------------------- 1031 // Size of instruction in bytes 1032 uint Node::size(PhaseRegAlloc *ra_) const { return 0; } 1033 1034 //------------------------------CFG Construction------------------------------- 1035 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root, 1036 // Goto and Return. 1037 const Node *Node::is_block_proj() const { return 0; } 1038 1039 // Minimum guaranteed type 1040 const Type *Node::bottom_type() const { return Type::BOTTOM; } 1041 1042 1043 //------------------------------raise_bottom_type------------------------------ 1044 // Get the worst-case Type output for this Node. 1045 void Node::raise_bottom_type(const Type* new_type) { 1046 if (is_Type()) { 1047 TypeNode *n = this->as_Type(); 1048 if (VerifyAliases) { 1049 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1050 } 1051 n->set_type(new_type); 1052 } else if (is_Load()) { 1053 LoadNode *n = this->as_Load(); 1054 if (VerifyAliases) { 1055 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1056 } 1057 n->set_type(new_type); 1058 } 1059 } 1060 1061 //------------------------------Identity--------------------------------------- 1062 // Return a node that the given node is equivalent to. 1063 Node* Node::Identity(PhaseGVN* phase) { 1064 return this; // Default to no identities 1065 } 1066 1067 //------------------------------Value------------------------------------------ 1068 // Compute a new Type for a node using the Type of the inputs. 1069 const Type* Node::Value(PhaseGVN* phase) const { 1070 return bottom_type(); // Default to worst-case Type 1071 } 1072 1073 //------------------------------Ideal------------------------------------------ 1074 // 1075 // 'Idealize' the graph rooted at this Node. 1076 // 1077 // In order to be efficient and flexible there are some subtle invariants 1078 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks 1079 // these invariants, although its too slow to have on by default. If you are 1080 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN! 1081 // 1082 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this' 1083 // pointer. If ANY change is made, it must return the root of the reshaped 1084 // graph - even if the root is the same Node. Example: swapping the inputs 1085 // to an AddINode gives the same answer and same root, but you still have to 1086 // return the 'this' pointer instead of NULL. 1087 // 1088 // You cannot return an OLD Node, except for the 'this' pointer. Use the 1089 // Identity call to return an old Node; basically if Identity can find 1090 // another Node have the Ideal call make no change and return NULL. 1091 // Example: AddINode::Ideal must check for add of zero; in this case it 1092 // returns NULL instead of doing any graph reshaping. 1093 // 1094 // You cannot modify any old Nodes except for the 'this' pointer. Due to 1095 // sharing there may be other users of the old Nodes relying on their current 1096 // semantics. Modifying them will break the other users. 1097 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 1098 // "X+3" unchanged in case it is shared. 1099 // 1100 // If you modify the 'this' pointer's inputs, you should use 1101 // 'set_req'. If you are making a new Node (either as the new root or 1102 // some new internal piece) you may use 'init_req' to set the initial 1103 // value. You can make a new Node with either 'new' or 'clone'. In 1104 // either case, def-use info is correctly maintained. 1105 // 1106 // Example: reshape "(X+3)+4" into "X+7": 1107 // set_req(1, in(1)->in(1)); 1108 // set_req(2, phase->intcon(7)); 1109 // return this; 1110 // Example: reshape "X*4" into "X<<2" 1111 // return new LShiftINode(in(1), phase->intcon(2)); 1112 // 1113 // You must call 'phase->transform(X)' on any new Nodes X you make, except 1114 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 1115 // Node *shift=phase->transform(new LShiftINode(in(1),phase->intcon(5))); 1116 // return new AddINode(shift, in(1)); 1117 // 1118 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 1119 // These forms are faster than 'phase->transform(new ConNode())' and Do 1120 // The Right Thing with def-use info. 1121 // 1122 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped 1123 // graph uses the 'this' Node it must be the root. If you want a Node with 1124 // the same Opcode as the 'this' pointer use 'clone'. 1125 // 1126 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { 1127 return NULL; // Default to being Ideal already 1128 } 1129 1130 // Some nodes have specific Ideal subgraph transformations only if they are 1131 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1132 // for the transformations to happen. 1133 bool Node::has_special_unique_user() const { 1134 assert(outcnt() == 1, "match only for unique out"); 1135 Node* n = unique_out(); 1136 int op = Opcode(); 1137 if (this->is_Store()) { 1138 // Condition for back-to-back stores folding. 1139 return n->Opcode() == op && n->in(MemNode::Memory) == this; 1140 } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) { 1141 // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input 1142 return n->Opcode() == Op_MemBarAcquire; 1143 } else if (op == Op_AddL) { 1144 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) 1145 return n->Opcode() == Op_ConvL2I && n->in(1) == this; 1146 } else if (op == Op_SubI || op == Op_SubL) { 1147 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y) 1148 return n->Opcode() == op && n->in(2) == this; 1149 } else if (is_If() && (n->is_IfFalse() || n->is_IfTrue())) { 1150 // See IfProjNode::Identity() 1151 return true; 1152 } 1153 return false; 1154 }; 1155 1156 //--------------------------find_exact_control--------------------------------- 1157 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1158 Node* Node::find_exact_control(Node* ctrl) { 1159 if (ctrl == NULL && this->is_Region()) 1160 ctrl = this->as_Region()->is_copy(); 1161 1162 if (ctrl != NULL && ctrl->is_CatchProj()) { 1163 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) 1164 ctrl = ctrl->in(0); 1165 if (ctrl != NULL && !ctrl->is_top()) 1166 ctrl = ctrl->in(0); 1167 } 1168 1169 if (ctrl != NULL && ctrl->is_Proj()) 1170 ctrl = ctrl->in(0); 1171 1172 return ctrl; 1173 } 1174 1175 //--------------------------dominates------------------------------------------ 1176 // Helper function for MemNode::all_controls_dominate(). 1177 // Check if 'this' control node dominates or equal to 'sub' control node. 1178 // We already know that if any path back to Root or Start reaches 'this', 1179 // then all paths so, so this is a simple search for one example, 1180 // not an exhaustive search for a counterexample. 1181 bool Node::dominates(Node* sub, Node_List &nlist) { 1182 assert(this->is_CFG(), "expecting control"); 1183 assert(sub != NULL && sub->is_CFG(), "expecting control"); 1184 1185 // detect dead cycle without regions 1186 int iterations_without_region_limit = DominatorSearchLimit; 1187 1188 Node* orig_sub = sub; 1189 Node* dom = this; 1190 bool met_dom = false; 1191 nlist.clear(); 1192 1193 // Walk 'sub' backward up the chain to 'dom', watching for regions. 1194 // After seeing 'dom', continue up to Root or Start. 1195 // If we hit a region (backward split point), it may be a loop head. 1196 // Keep going through one of the region's inputs. If we reach the 1197 // same region again, go through a different input. Eventually we 1198 // will either exit through the loop head, or give up. 1199 // (If we get confused, break out and return a conservative 'false'.) 1200 while (sub != NULL) { 1201 if (sub->is_top()) break; // Conservative answer for dead code. 1202 if (sub == dom) { 1203 if (nlist.size() == 0) { 1204 // No Region nodes except loops were visited before and the EntryControl 1205 // path was taken for loops: it did not walk in a cycle. 1206 return true; 1207 } else if (met_dom) { 1208 break; // already met before: walk in a cycle 1209 } else { 1210 // Region nodes were visited. Continue walk up to Start or Root 1211 // to make sure that it did not walk in a cycle. 1212 met_dom = true; // first time meet 1213 iterations_without_region_limit = DominatorSearchLimit; // Reset 1214 } 1215 } 1216 if (sub->is_Start() || sub->is_Root()) { 1217 // Success if we met 'dom' along a path to Start or Root. 1218 // We assume there are no alternative paths that avoid 'dom'. 1219 // (This assumption is up to the caller to ensure!) 1220 return met_dom; 1221 } 1222 Node* up = sub->in(0); 1223 // Normalize simple pass-through regions and projections: 1224 up = sub->find_exact_control(up); 1225 // If sub == up, we found a self-loop. Try to push past it. 1226 if (sub == up && sub->is_Loop()) { 1227 // Take loop entry path on the way up to 'dom'. 1228 up = sub->in(1); // in(LoopNode::EntryControl); 1229 } else if (sub == up && sub->is_Region() && sub->req() != 3) { 1230 // Always take in(1) path on the way up to 'dom' for clone regions 1231 // (with only one input) or regions which merge > 2 paths 1232 // (usually used to merge fast/slow paths). 1233 up = sub->in(1); 1234 } else if (sub == up && sub->is_Region()) { 1235 // Try both paths for Regions with 2 input paths (it may be a loop head). 1236 // It could give conservative 'false' answer without information 1237 // which region's input is the entry path. 1238 iterations_without_region_limit = DominatorSearchLimit; // Reset 1239 1240 bool region_was_visited_before = false; 1241 // Was this Region node visited before? 1242 // If so, we have reached it because we accidentally took a 1243 // loop-back edge from 'sub' back into the body of the loop, 1244 // and worked our way up again to the loop header 'sub'. 1245 // So, take the first unexplored path on the way up to 'dom'. 1246 for (int j = nlist.size() - 1; j >= 0; j--) { 1247 intptr_t ni = (intptr_t)nlist.at(j); 1248 Node* visited = (Node*)(ni & ~1); 1249 bool visited_twice_already = ((ni & 1) != 0); 1250 if (visited == sub) { 1251 if (visited_twice_already) { 1252 // Visited 2 paths, but still stuck in loop body. Give up. 1253 return false; 1254 } 1255 // The Region node was visited before only once. 1256 // (We will repush with the low bit set, below.) 1257 nlist.remove(j); 1258 // We will find a new edge and re-insert. 1259 region_was_visited_before = true; 1260 break; 1261 } 1262 } 1263 1264 // Find an incoming edge which has not been seen yet; walk through it. 1265 assert(up == sub, ""); 1266 uint skip = region_was_visited_before ? 1 : 0; 1267 for (uint i = 1; i < sub->req(); i++) { 1268 Node* in = sub->in(i); 1269 if (in != NULL && !in->is_top() && in != sub) { 1270 if (skip == 0) { 1271 up = in; 1272 break; 1273 } 1274 --skip; // skip this nontrivial input 1275 } 1276 } 1277 1278 // Set 0 bit to indicate that both paths were taken. 1279 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); 1280 } 1281 1282 if (up == sub) { 1283 break; // some kind of tight cycle 1284 } 1285 if (up == orig_sub && met_dom) { 1286 // returned back after visiting 'dom' 1287 break; // some kind of cycle 1288 } 1289 if (--iterations_without_region_limit < 0) { 1290 break; // dead cycle 1291 } 1292 sub = up; 1293 } 1294 1295 // Did not meet Root or Start node in pred. chain. 1296 // Conservative answer for dead code. 1297 return false; 1298 } 1299 1300 //------------------------------remove_dead_region----------------------------- 1301 // This control node is dead. Follow the subgraph below it making everything 1302 // using it dead as well. This will happen normally via the usual IterGVN 1303 // worklist but this call is more efficient. Do not update use-def info 1304 // inside the dead region, just at the borders. 1305 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { 1306 // Con's are a popular node to re-hit in the hash table again. 1307 if( dead->is_Con() ) return; 1308 1309 // Can't put ResourceMark here since igvn->_worklist uses the same arena 1310 // for verify pass with +VerifyOpto and we add/remove elements in it here. 1311 Node_List nstack(Thread::current()->resource_area()); 1312 1313 Node *top = igvn->C->top(); 1314 nstack.push(dead); 1315 bool has_irreducible_loop = igvn->C->has_irreducible_loop(); 1316 1317 while (nstack.size() > 0) { 1318 dead = nstack.pop(); 1319 if (dead->outcnt() > 0) { 1320 // Keep dead node on stack until all uses are processed. 1321 nstack.push(dead); 1322 // For all Users of the Dead... ;-) 1323 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) { 1324 Node* use = dead->last_out(k); 1325 igvn->hash_delete(use); // Yank from hash table prior to mod 1326 if (use->in(0) == dead) { // Found another dead node 1327 assert (!use->is_Con(), "Control for Con node should be Root node."); 1328 use->set_req(0, top); // Cut dead edge to prevent processing 1329 nstack.push(use); // the dead node again. 1330 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop 1331 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode) 1332 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead 1333 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing 1334 use->set_req(0, top); // Cut self edge 1335 nstack.push(use); 1336 } else { // Else found a not-dead user 1337 // Dead if all inputs are top or null 1338 bool dead_use = !use->is_Root(); // Keep empty graph alive 1339 for (uint j = 1; j < use->req(); j++) { 1340 Node* in = use->in(j); 1341 if (in == dead) { // Turn all dead inputs into TOP 1342 use->set_req(j, top); 1343 } else if (in != NULL && !in->is_top()) { 1344 dead_use = false; 1345 } 1346 } 1347 if (dead_use) { 1348 if (use->is_Region()) { 1349 use->set_req(0, top); // Cut self edge 1350 } 1351 nstack.push(use); 1352 } else { 1353 igvn->_worklist.push(use); 1354 } 1355 } 1356 // Refresh the iterator, since any number of kills might have happened. 1357 k = dead->last_outs(kmin); 1358 } 1359 } else { // (dead->outcnt() == 0) 1360 // Done with outputs. 1361 igvn->hash_delete(dead); 1362 igvn->_worklist.remove(dead); 1363 igvn->C->remove_modified_node(dead); 1364 igvn->set_type(dead, Type::TOP); 1365 if (dead->is_macro()) { 1366 igvn->C->remove_macro_node(dead); 1367 } 1368 if (dead->is_expensive()) { 1369 igvn->C->remove_expensive_node(dead); 1370 } 1371 CastIINode* cast = dead->isa_CastII(); 1372 if (cast != NULL && cast->has_range_check()) { 1373 igvn->C->remove_range_check_cast(cast); 1374 } 1375 if (dead->Opcode() == Op_Opaque4) { 1376 igvn->C->remove_range_check_cast(dead); 1377 } 1378 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1379 bs->unregister_potential_barrier_node(dead); 1380 igvn->C->record_dead_node(dead->_idx); 1381 // Kill all inputs to the dead guy 1382 for (uint i=0; i < dead->req(); i++) { 1383 Node *n = dead->in(i); // Get input to dead guy 1384 if (n != NULL && !n->is_top()) { // Input is valid? 1385 dead->set_req(i, top); // Smash input away 1386 if (n->outcnt() == 0) { // Input also goes dead? 1387 if (!n->is_Con()) 1388 nstack.push(n); // Clear it out as well 1389 } else if (n->outcnt() == 1 && 1390 n->has_special_unique_user()) { 1391 igvn->add_users_to_worklist( n ); 1392 } else if (n->outcnt() <= 2 && n->is_Store()) { 1393 // Push store's uses on worklist to enable folding optimization for 1394 // store/store and store/load to the same address. 1395 // The restriction (outcnt() <= 2) is the same as in set_req_X() 1396 // and remove_globally_dead_node(). 1397 igvn->add_users_to_worklist( n ); 1398 } else { 1399 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, n); 1400 } 1401 } 1402 } 1403 } // (dead->outcnt() == 0) 1404 } // while (nstack.size() > 0) for outputs 1405 return; 1406 } 1407 1408 //------------------------------remove_dead_region----------------------------- 1409 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { 1410 Node *n = in(0); 1411 if( !n ) return false; 1412 // Lost control into this guy? I.e., it became unreachable? 1413 // Aggressively kill all unreachable code. 1414 if (can_reshape && n->is_top()) { 1415 kill_dead_code(this, phase->is_IterGVN()); 1416 return false; // Node is dead. 1417 } 1418 1419 if( n->is_Region() && n->as_Region()->is_copy() ) { 1420 Node *m = n->nonnull_req(); 1421 set_req(0, m); 1422 return true; 1423 } 1424 return false; 1425 } 1426 1427 //------------------------------hash------------------------------------------- 1428 // Hash function over Nodes. 1429 uint Node::hash() const { 1430 uint sum = 0; 1431 for( uint i=0; i<_cnt; i++ ) // Add in all inputs 1432 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs 1433 return (sum>>2) + _cnt + Opcode(); 1434 } 1435 1436 //------------------------------cmp-------------------------------------------- 1437 // Compare special parts of simple Nodes 1438 uint Node::cmp( const Node &n ) const { 1439 return 1; // Must be same 1440 } 1441 1442 //------------------------------rematerialize----------------------------------- 1443 // Should we clone rather than spill this instruction? 1444 bool Node::rematerialize() const { 1445 if ( is_Mach() ) 1446 return this->as_Mach()->rematerialize(); 1447 else 1448 return (_flags & Flag_rematerialize) != 0; 1449 } 1450 1451 //------------------------------needs_anti_dependence_check--------------------- 1452 // Nodes which use memory without consuming it, hence need antidependences. 1453 bool Node::needs_anti_dependence_check() const { 1454 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 1455 return false; 1456 else 1457 return in(1)->bottom_type()->has_memory(); 1458 } 1459 1460 1461 // Get an integer constant from a ConNode (or CastIINode). 1462 // Return a default value if there is no apparent constant here. 1463 const TypeInt* Node::find_int_type() const { 1464 if (this->is_Type()) { 1465 return this->as_Type()->type()->isa_int(); 1466 } else if (this->is_Con()) { 1467 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1468 return this->bottom_type()->isa_int(); 1469 } 1470 return NULL; 1471 } 1472 1473 // Get a pointer constant from a ConstNode. 1474 // Returns the constant if it is a pointer ConstNode 1475 intptr_t Node::get_ptr() const { 1476 assert( Opcode() == Op_ConP, "" ); 1477 return ((ConPNode*)this)->type()->is_ptr()->get_con(); 1478 } 1479 1480 // Get a narrow oop constant from a ConNNode. 1481 intptr_t Node::get_narrowcon() const { 1482 assert( Opcode() == Op_ConN, "" ); 1483 return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); 1484 } 1485 1486 // Get a long constant from a ConNode. 1487 // Return a default value if there is no apparent constant here. 1488 const TypeLong* Node::find_long_type() const { 1489 if (this->is_Type()) { 1490 return this->as_Type()->type()->isa_long(); 1491 } else if (this->is_Con()) { 1492 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1493 return this->bottom_type()->isa_long(); 1494 } 1495 return NULL; 1496 } 1497 1498 1499 /** 1500 * Return a ptr type for nodes which should have it. 1501 */ 1502 const TypePtr* Node::get_ptr_type() const { 1503 const TypePtr* tp = this->bottom_type()->make_ptr(); 1504 #ifdef ASSERT 1505 if (tp == NULL) { 1506 this->dump(1); 1507 assert((tp != NULL), "unexpected node type"); 1508 } 1509 #endif 1510 return tp; 1511 } 1512 1513 // Get a double constant from a ConstNode. 1514 // Returns the constant if it is a double ConstNode 1515 jdouble Node::getd() const { 1516 assert( Opcode() == Op_ConD, "" ); 1517 return ((ConDNode*)this)->type()->is_double_constant()->getd(); 1518 } 1519 1520 // Get a float constant from a ConstNode. 1521 // Returns the constant if it is a float ConstNode 1522 jfloat Node::getf() const { 1523 assert( Opcode() == Op_ConF, "" ); 1524 return ((ConFNode*)this)->type()->is_float_constant()->getf(); 1525 } 1526 1527 #ifndef PRODUCT 1528 1529 //------------------------------find------------------------------------------ 1530 // Find a neighbor of this Node with the given _idx 1531 // If idx is negative, find its absolute value, following both _in and _out. 1532 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, 1533 VectorSet* old_space, VectorSet* new_space ) { 1534 int node_idx = (idx >= 0) ? idx : -idx; 1535 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. 1536 // Contained in new_space or old_space? Check old_arena first since it's mostly empty. 1537 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; 1538 if( v->test(n->_idx) ) return; 1539 if( (int)n->_idx == node_idx 1540 debug_only(|| n->debug_idx() == node_idx) ) { 1541 if (result != NULL) 1542 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n", 1543 (uintptr_t)result, (uintptr_t)n, node_idx); 1544 result = n; 1545 } 1546 v->set(n->_idx); 1547 for( uint i=0; i<n->len(); i++ ) { 1548 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; 1549 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); 1550 } 1551 // Search along forward edges also: 1552 if (idx < 0 && !only_ctrl) { 1553 for( uint j=0; j<n->outcnt(); j++ ) { 1554 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); 1555 } 1556 } 1557 #ifdef ASSERT 1558 // Search along debug_orig edges last, checking for cycles 1559 Node* orig = n->debug_orig(); 1560 if (orig != NULL) { 1561 do { 1562 if (NotANode(orig)) break; 1563 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); 1564 orig = orig->debug_orig(); 1565 } while (orig != NULL && orig != n->debug_orig()); 1566 } 1567 #endif //ASSERT 1568 } 1569 1570 // call this from debugger: 1571 Node* find_node(Node* n, int idx) { 1572 return n->find(idx); 1573 } 1574 1575 //------------------------------find------------------------------------------- 1576 Node* Node::find(int idx) const { 1577 ResourceArea *area = Thread::current()->resource_area(); 1578 VectorSet old_space(area), new_space(area); 1579 Node* result = NULL; 1580 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); 1581 return result; 1582 } 1583 1584 //------------------------------find_ctrl-------------------------------------- 1585 // Find an ancestor to this node in the control history with given _idx 1586 Node* Node::find_ctrl(int idx) const { 1587 ResourceArea *area = Thread::current()->resource_area(); 1588 VectorSet old_space(area), new_space(area); 1589 Node* result = NULL; 1590 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); 1591 return result; 1592 } 1593 #endif 1594 1595 1596 1597 #ifndef PRODUCT 1598 1599 // -----------------------------Name------------------------------------------- 1600 extern const char *NodeClassNames[]; 1601 const char *Node::Name() const { return NodeClassNames[Opcode()]; } 1602 1603 static bool is_disconnected(const Node* n) { 1604 for (uint i = 0; i < n->req(); i++) { 1605 if (n->in(i) != NULL) return false; 1606 } 1607 return true; 1608 } 1609 1610 #ifdef ASSERT 1611 static void dump_orig(Node* orig, outputStream *st) { 1612 Compile* C = Compile::current(); 1613 if (NotANode(orig)) orig = NULL; 1614 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1615 if (orig == NULL) return; 1616 st->print(" !orig="); 1617 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops 1618 if (NotANode(fast)) fast = NULL; 1619 while (orig != NULL) { 1620 bool discon = is_disconnected(orig); // if discon, print [123] else 123 1621 if (discon) st->print("["); 1622 if (!Compile::current()->node_arena()->contains(orig)) 1623 st->print("o"); 1624 st->print("%d", orig->_idx); 1625 if (discon) st->print("]"); 1626 orig = orig->debug_orig(); 1627 if (NotANode(orig)) orig = NULL; 1628 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1629 if (orig != NULL) st->print(","); 1630 if (fast != NULL) { 1631 // Step fast twice for each single step of orig: 1632 fast = fast->debug_orig(); 1633 if (NotANode(fast)) fast = NULL; 1634 if (fast != NULL && fast != orig) { 1635 fast = fast->debug_orig(); 1636 if (NotANode(fast)) fast = NULL; 1637 } 1638 if (fast == orig) { 1639 st->print("..."); 1640 break; 1641 } 1642 } 1643 } 1644 } 1645 1646 void Node::set_debug_orig(Node* orig) { 1647 _debug_orig = orig; 1648 if (BreakAtNode == 0) return; 1649 if (NotANode(orig)) orig = NULL; 1650 int trip = 10; 1651 while (orig != NULL) { 1652 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { 1653 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", 1654 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); 1655 BREAKPOINT; 1656 } 1657 orig = orig->debug_orig(); 1658 if (NotANode(orig)) orig = NULL; 1659 if (trip-- <= 0) break; 1660 } 1661 } 1662 #endif //ASSERT 1663 1664 //------------------------------dump------------------------------------------ 1665 // Dump a Node 1666 void Node::dump(const char* suffix, bool mark, outputStream *st) const { 1667 Compile* C = Compile::current(); 1668 bool is_new = C->node_arena()->contains(this); 1669 C->_in_dump_cnt++; 1670 st->print("%c%d%s\t%s\t=== ", is_new ? ' ' : 'o', _idx, mark ? " >" : "", Name()); 1671 1672 // Dump the required and precedence inputs 1673 dump_req(st); 1674 dump_prec(st); 1675 // Dump the outputs 1676 dump_out(st); 1677 1678 if (is_disconnected(this)) { 1679 #ifdef ASSERT 1680 st->print(" [%d]",debug_idx()); 1681 dump_orig(debug_orig(), st); 1682 #endif 1683 st->cr(); 1684 C->_in_dump_cnt--; 1685 return; // don't process dead nodes 1686 } 1687 1688 if (C->clone_map().value(_idx) != 0) { 1689 C->clone_map().dump(_idx); 1690 } 1691 // Dump node-specific info 1692 dump_spec(st); 1693 #ifdef ASSERT 1694 // Dump the non-reset _debug_idx 1695 if (Verbose && WizardMode) { 1696 st->print(" [%d]",debug_idx()); 1697 } 1698 #endif 1699 1700 const Type *t = bottom_type(); 1701 1702 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { 1703 const TypeInstPtr *toop = t->isa_instptr(); 1704 const TypeKlassPtr *tkls = t->isa_klassptr(); 1705 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); 1706 if (klass && klass->is_loaded() && klass->is_interface()) { 1707 st->print(" Interface:"); 1708 } else if (toop) { 1709 st->print(" Oop:"); 1710 } else if (tkls) { 1711 st->print(" Klass:"); 1712 } 1713 t->dump_on(st); 1714 } else if (t == Type::MEMORY) { 1715 st->print(" Memory:"); 1716 MemNode::dump_adr_type(this, adr_type(), st); 1717 } else if (Verbose || WizardMode) { 1718 st->print(" Type:"); 1719 if (t) { 1720 t->dump_on(st); 1721 } else { 1722 st->print("no type"); 1723 } 1724 } else if (t->isa_vect() && this->is_MachSpillCopy()) { 1725 // Dump MachSpillcopy vector type. 1726 t->dump_on(st); 1727 } 1728 if (is_new) { 1729 debug_only(dump_orig(debug_orig(), st)); 1730 Node_Notes* nn = C->node_notes_at(_idx); 1731 if (nn != NULL && !nn->is_clear()) { 1732 if (nn->jvms() != NULL) { 1733 st->print(" !jvms:"); 1734 nn->jvms()->dump_spec(st); 1735 } 1736 } 1737 } 1738 if (suffix) st->print("%s", suffix); 1739 C->_in_dump_cnt--; 1740 } 1741 1742 //------------------------------dump_req-------------------------------------- 1743 void Node::dump_req(outputStream *st) const { 1744 // Dump the required input edges 1745 for (uint i = 0; i < req(); i++) { // For all required inputs 1746 Node* d = in(i); 1747 if (d == NULL) { 1748 st->print("_ "); 1749 } else if (NotANode(d)) { 1750 st->print("NotANode "); // uninitialized, sentinel, garbage, etc. 1751 } else { 1752 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx); 1753 } 1754 } 1755 } 1756 1757 1758 //------------------------------dump_prec------------------------------------- 1759 void Node::dump_prec(outputStream *st) const { 1760 // Dump the precedence edges 1761 int any_prec = 0; 1762 for (uint i = req(); i < len(); i++) { // For all precedence inputs 1763 Node* p = in(i); 1764 if (p != NULL) { 1765 if (!any_prec++) st->print(" |"); 1766 if (NotANode(p)) { st->print("NotANode "); continue; } 1767 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1768 } 1769 } 1770 } 1771 1772 //------------------------------dump_out-------------------------------------- 1773 void Node::dump_out(outputStream *st) const { 1774 // Delimit the output edges 1775 st->print(" [["); 1776 // Dump the output edges 1777 for (uint i = 0; i < _outcnt; i++) { // For all outputs 1778 Node* u = _out[i]; 1779 if (u == NULL) { 1780 st->print("_ "); 1781 } else if (NotANode(u)) { 1782 st->print("NotANode "); 1783 } else { 1784 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx); 1785 } 1786 } 1787 st->print("]] "); 1788 } 1789 1790 //----------------------------collect_nodes_i---------------------------------- 1791 // Collects nodes from an Ideal graph, starting from a given start node and 1792 // moving in a given direction until a certain depth (distance from the start 1793 // node) is reached. Duplicates are ignored. 1794 // Arguments: 1795 // nstack: the nodes are collected into this array. 1796 // start: the node at which to start collecting. 1797 // direction: if this is a positive number, collect input nodes; if it is 1798 // a negative number, collect output nodes. 1799 // depth: collect nodes up to this distance from the start node. 1800 // include_start: whether to include the start node in the result collection. 1801 // only_ctrl: whether to regard control edges only during traversal. 1802 // only_data: whether to regard data edges only during traversal. 1803 static void collect_nodes_i(GrowableArray<Node*> *nstack, const Node* start, int direction, uint depth, bool include_start, bool only_ctrl, bool only_data) { 1804 Node* s = (Node*) start; // remove const 1805 nstack->append(s); 1806 int begin = 0; 1807 int end = 0; 1808 for(uint i = 0; i < depth; i++) { 1809 end = nstack->length(); 1810 for(int j = begin; j < end; j++) { 1811 Node* tp = nstack->at(j); 1812 uint limit = direction > 0 ? tp->len() : tp->outcnt(); 1813 for(uint k = 0; k < limit; k++) { 1814 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); 1815 1816 if (NotANode(n)) continue; 1817 // do not recurse through top or the root (would reach unrelated stuff) 1818 if (n->is_Root() || n->is_top()) continue; 1819 if (only_ctrl && !n->is_CFG()) continue; 1820 if (only_data && n->is_CFG()) continue; 1821 1822 bool on_stack = nstack->contains(n); 1823 if (!on_stack) { 1824 nstack->append(n); 1825 } 1826 } 1827 } 1828 begin = end; 1829 } 1830 if (!include_start) { 1831 nstack->remove(s); 1832 } 1833 } 1834 1835 //------------------------------dump_nodes------------------------------------- 1836 static void dump_nodes(const Node* start, int d, bool only_ctrl) { 1837 if (NotANode(start)) return; 1838 1839 GrowableArray <Node *> nstack(Compile::current()->live_nodes()); 1840 collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false); 1841 1842 int end = nstack.length(); 1843 if (d > 0) { 1844 for(int j = end-1; j >= 0; j--) { 1845 nstack.at(j)->dump(); 1846 } 1847 } else { 1848 for(int j = 0; j < end; j++) { 1849 nstack.at(j)->dump(); 1850 } 1851 } 1852 } 1853 1854 //------------------------------dump------------------------------------------- 1855 void Node::dump(int d) const { 1856 dump_nodes(this, d, false); 1857 } 1858 1859 //------------------------------dump_ctrl-------------------------------------- 1860 // Dump a Node's control history to depth 1861 void Node::dump_ctrl(int d) const { 1862 dump_nodes(this, d, true); 1863 } 1864 1865 //-----------------------------dump_compact------------------------------------ 1866 void Node::dump_comp() const { 1867 this->dump_comp("\n"); 1868 } 1869 1870 //-----------------------------dump_compact------------------------------------ 1871 // Dump a Node in compact representation, i.e., just print its name and index. 1872 // Nodes can specify additional specifics to print in compact representation by 1873 // implementing dump_compact_spec. 1874 void Node::dump_comp(const char* suffix, outputStream *st) const { 1875 Compile* C = Compile::current(); 1876 C->_in_dump_cnt++; 1877 st->print("%s(%d)", Name(), _idx); 1878 this->dump_compact_spec(st); 1879 if (suffix) { 1880 st->print("%s", suffix); 1881 } 1882 C->_in_dump_cnt--; 1883 } 1884 1885 //----------------------------dump_related------------------------------------- 1886 // Dump a Node's related nodes - the notion of "related" depends on the Node at 1887 // hand and is determined by the implementation of the virtual method rel. 1888 void Node::dump_related() const { 1889 Compile* C = Compile::current(); 1890 GrowableArray <Node *> in_rel(C->unique()); 1891 GrowableArray <Node *> out_rel(C->unique()); 1892 this->related(&in_rel, &out_rel, false); 1893 for (int i = in_rel.length() - 1; i >= 0; i--) { 1894 in_rel.at(i)->dump(); 1895 } 1896 this->dump("\n", true); 1897 for (int i = 0; i < out_rel.length(); i++) { 1898 out_rel.at(i)->dump(); 1899 } 1900 } 1901 1902 //----------------------------dump_related------------------------------------- 1903 // Dump a Node's related nodes up to a given depth (distance from the start 1904 // node). 1905 // Arguments: 1906 // d_in: depth for input nodes. 1907 // d_out: depth for output nodes (note: this also is a positive number). 1908 void Node::dump_related(uint d_in, uint d_out) const { 1909 Compile* C = Compile::current(); 1910 GrowableArray <Node *> in_rel(C->unique()); 1911 GrowableArray <Node *> out_rel(C->unique()); 1912 1913 // call collect_nodes_i directly 1914 collect_nodes_i(&in_rel, this, 1, d_in, false, false, false); 1915 collect_nodes_i(&out_rel, this, -1, d_out, false, false, false); 1916 1917 for (int i = in_rel.length() - 1; i >= 0; i--) { 1918 in_rel.at(i)->dump(); 1919 } 1920 this->dump("\n", true); 1921 for (int i = 0; i < out_rel.length(); i++) { 1922 out_rel.at(i)->dump(); 1923 } 1924 } 1925 1926 //------------------------dump_related_compact--------------------------------- 1927 // Dump a Node's related nodes in compact representation. The notion of 1928 // "related" depends on the Node at hand and is determined by the implementation 1929 // of the virtual method rel. 1930 void Node::dump_related_compact() const { 1931 Compile* C = Compile::current(); 1932 GrowableArray <Node *> in_rel(C->unique()); 1933 GrowableArray <Node *> out_rel(C->unique()); 1934 this->related(&in_rel, &out_rel, true); 1935 int n_in = in_rel.length(); 1936 int n_out = out_rel.length(); 1937 1938 this->dump_comp(n_in == 0 ? "\n" : " "); 1939 for (int i = 0; i < n_in; i++) { 1940 in_rel.at(i)->dump_comp(i == n_in - 1 ? "\n" : " "); 1941 } 1942 for (int i = 0; i < n_out; i++) { 1943 out_rel.at(i)->dump_comp(i == n_out - 1 ? "\n" : " "); 1944 } 1945 } 1946 1947 //------------------------------related---------------------------------------- 1948 // Collect a Node's related nodes. The default behaviour just collects the 1949 // inputs and outputs at depth 1, including both control and data flow edges, 1950 // regardless of whether the presentation is compact or not. For data nodes, 1951 // the default is to collect all data inputs (till level 1 if compact), and 1952 // outputs till level 1. 1953 void Node::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 1954 if (this->is_CFG()) { 1955 collect_nodes_i(in_rel, this, 1, 1, false, false, false); 1956 collect_nodes_i(out_rel, this, -1, 1, false, false, false); 1957 } else { 1958 if (compact) { 1959 this->collect_nodes(in_rel, 1, false, true); 1960 } else { 1961 this->collect_nodes_in_all_data(in_rel, false); 1962 } 1963 this->collect_nodes(out_rel, -1, false, false); 1964 } 1965 } 1966 1967 //---------------------------collect_nodes------------------------------------- 1968 // An entry point to the low-level node collection facility, to start from a 1969 // given node in the graph. The start node is by default not included in the 1970 // result. 1971 // Arguments: 1972 // ns: collect the nodes into this data structure. 1973 // d: the depth (distance from start node) to which nodes should be 1974 // collected. A value >0 indicates input nodes, a value <0, output 1975 // nodes. 1976 // ctrl: include only control nodes. 1977 // data: include only data nodes. 1978 void Node::collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const { 1979 if (ctrl && data) { 1980 // ignore nonsensical combination 1981 return; 1982 } 1983 collect_nodes_i(ns, this, d, (uint) ABS(d), false, ctrl, data); 1984 } 1985 1986 //--------------------------collect_nodes_in----------------------------------- 1987 static void collect_nodes_in(Node* start, GrowableArray<Node*> *ns, bool primary_is_data, bool collect_secondary) { 1988 // The maximum depth is determined using a BFS that visits all primary (data 1989 // or control) inputs and increments the depth at each level. 1990 uint d_in = 0; 1991 GrowableArray<Node*> nodes(Compile::current()->unique()); 1992 nodes.push(start); 1993 int nodes_at_current_level = 1; 1994 int n_idx = 0; 1995 while (nodes_at_current_level > 0) { 1996 // Add all primary inputs reachable from the current level to the list, and 1997 // increase the depth if there were any. 1998 int nodes_at_next_level = 0; 1999 bool nodes_added = false; 2000 while (nodes_at_current_level > 0) { 2001 nodes_at_current_level--; 2002 Node* current = nodes.at(n_idx++); 2003 for (uint i = 0; i < current->len(); i++) { 2004 Node* n = current->in(i); 2005 if (NotANode(n)) { 2006 continue; 2007 } 2008 if ((primary_is_data && n->is_CFG()) || (!primary_is_data && !n->is_CFG())) { 2009 continue; 2010 } 2011 if (!nodes.contains(n)) { 2012 nodes.push(n); 2013 nodes_added = true; 2014 nodes_at_next_level++; 2015 } 2016 } 2017 } 2018 if (nodes_added) { 2019 d_in++; 2020 } 2021 nodes_at_current_level = nodes_at_next_level; 2022 } 2023 start->collect_nodes(ns, d_in, !primary_is_data, primary_is_data); 2024 if (collect_secondary) { 2025 // Now, iterate over the secondary nodes in ns and add the respective 2026 // boundary reachable from them. 2027 GrowableArray<Node*> sns(Compile::current()->unique()); 2028 for (GrowableArrayIterator<Node*> it = ns->begin(); it != ns->end(); ++it) { 2029 Node* n = *it; 2030 n->collect_nodes(&sns, 1, primary_is_data, !primary_is_data); 2031 for (GrowableArrayIterator<Node*> d = sns.begin(); d != sns.end(); ++d) { 2032 ns->append_if_missing(*d); 2033 } 2034 sns.clear(); 2035 } 2036 } 2037 } 2038 2039 //---------------------collect_nodes_in_all_data------------------------------- 2040 // Collect the entire data input graph. Include the control boundary if 2041 // requested. 2042 // Arguments: 2043 // ns: collect the nodes into this data structure. 2044 // ctrl: if true, include the control boundary. 2045 void Node::collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const { 2046 collect_nodes_in((Node*) this, ns, true, ctrl); 2047 } 2048 2049 //--------------------------collect_nodes_in_all_ctrl-------------------------- 2050 // Collect the entire control input graph. Include the data boundary if 2051 // requested. 2052 // ns: collect the nodes into this data structure. 2053 // data: if true, include the control boundary. 2054 void Node::collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const { 2055 collect_nodes_in((Node*) this, ns, false, data); 2056 } 2057 2058 //------------------collect_nodes_out_all_ctrl_boundary------------------------ 2059 // Collect the entire output graph until hitting control node boundaries, and 2060 // include those. 2061 void Node::collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const { 2062 // Perform a BFS and stop at control nodes. 2063 GrowableArray<Node*> nodes(Compile::current()->unique()); 2064 nodes.push((Node*) this); 2065 while (nodes.length() > 0) { 2066 Node* current = nodes.pop(); 2067 if (NotANode(current)) { 2068 continue; 2069 } 2070 ns->append_if_missing(current); 2071 if (!current->is_CFG()) { 2072 for (DUIterator i = current->outs(); current->has_out(i); i++) { 2073 nodes.push(current->out(i)); 2074 } 2075 } 2076 } 2077 ns->remove((Node*) this); 2078 } 2079 2080 // VERIFICATION CODE 2081 // For each input edge to a node (ie - for each Use-Def edge), verify that 2082 // there is a corresponding Def-Use edge. 2083 //------------------------------verify_edges----------------------------------- 2084 void Node::verify_edges(Unique_Node_List &visited) { 2085 uint i, j, idx; 2086 int cnt; 2087 Node *n; 2088 2089 // Recursive termination test 2090 if (visited.member(this)) return; 2091 visited.push(this); 2092 2093 // Walk over all input edges, checking for correspondence 2094 for( i = 0; i < len(); i++ ) { 2095 n = in(i); 2096 if (n != NULL && !n->is_top()) { 2097 // Count instances of (Node *)this 2098 cnt = 0; 2099 for (idx = 0; idx < n->_outcnt; idx++ ) { 2100 if (n->_out[idx] == (Node *)this) cnt++; 2101 } 2102 assert( cnt > 0,"Failed to find Def-Use edge." ); 2103 // Check for duplicate edges 2104 // walk the input array downcounting the input edges to n 2105 for( j = 0; j < len(); j++ ) { 2106 if( in(j) == n ) cnt--; 2107 } 2108 assert( cnt == 0,"Mismatched edge count."); 2109 } else if (n == NULL) { 2110 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges"); 2111 } else { 2112 assert(n->is_top(), "sanity"); 2113 // Nothing to check. 2114 } 2115 } 2116 // Recursive walk over all input edges 2117 for( i = 0; i < len(); i++ ) { 2118 n = in(i); 2119 if( n != NULL ) 2120 in(i)->verify_edges(visited); 2121 } 2122 } 2123 2124 //------------------------------verify_recur----------------------------------- 2125 static const Node *unique_top = NULL; 2126 2127 void Node::verify_recur(const Node *n, int verify_depth, 2128 VectorSet &old_space, VectorSet &new_space) { 2129 if ( verify_depth == 0 ) return; 2130 if (verify_depth > 0) --verify_depth; 2131 2132 Compile* C = Compile::current(); 2133 2134 // Contained in new_space or old_space? 2135 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 2136 // Check for visited in the proper space. Numberings are not unique 2137 // across spaces so we need a separate VectorSet for each space. 2138 if( v->test_set(n->_idx) ) return; 2139 2140 if (n->is_Con() && n->bottom_type() == Type::TOP) { 2141 if (C->cached_top_node() == NULL) 2142 C->set_cached_top_node((Node*)n); 2143 assert(C->cached_top_node() == n, "TOP node must be unique"); 2144 } 2145 2146 for( uint i = 0; i < n->len(); i++ ) { 2147 Node *x = n->in(i); 2148 if (!x || x->is_top()) continue; 2149 2150 // Verify my input has a def-use edge to me 2151 if (true /*VerifyDefUse*/) { 2152 // Count use-def edges from n to x 2153 int cnt = 0; 2154 for( uint j = 0; j < n->len(); j++ ) 2155 if( n->in(j) == x ) 2156 cnt++; 2157 // Count def-use edges from x to n 2158 uint max = x->_outcnt; 2159 for( uint k = 0; k < max; k++ ) 2160 if (x->_out[k] == n) 2161 cnt--; 2162 assert( cnt == 0, "mismatched def-use edge counts" ); 2163 } 2164 2165 verify_recur(x, verify_depth, old_space, new_space); 2166 } 2167 2168 } 2169 2170 //------------------------------verify----------------------------------------- 2171 // Check Def-Use info for my subgraph 2172 void Node::verify() const { 2173 Compile* C = Compile::current(); 2174 Node* old_top = C->cached_top_node(); 2175 ResourceMark rm; 2176 ResourceArea *area = Thread::current()->resource_area(); 2177 VectorSet old_space(area), new_space(area); 2178 verify_recur(this, -1, old_space, new_space); 2179 C->set_cached_top_node(old_top); 2180 } 2181 #endif 2182 2183 2184 //------------------------------walk------------------------------------------- 2185 // Graph walk, with both pre-order and post-order functions 2186 void Node::walk(NFunc pre, NFunc post, void *env) { 2187 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk 2188 walk_(pre, post, env, visited); 2189 } 2190 2191 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) { 2192 if( visited.test_set(_idx) ) return; 2193 pre(*this,env); // Call the pre-order walk function 2194 for( uint i=0; i<_max; i++ ) 2195 if( in(i) ) // Input exists and is not walked? 2196 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions 2197 post(*this,env); // Call the post-order walk function 2198 } 2199 2200 void Node::nop(Node &, void*) {} 2201 2202 //------------------------------Registers-------------------------------------- 2203 // Do we Match on this edge index or not? Generally false for Control 2204 // and true for everything else. Weird for calls & returns. 2205 uint Node::match_edge(uint idx) const { 2206 return idx; // True for other than index 0 (control) 2207 } 2208 2209 static RegMask _not_used_at_all; 2210 // Register classes are defined for specific machines 2211 const RegMask &Node::out_RegMask() const { 2212 ShouldNotCallThis(); 2213 return _not_used_at_all; 2214 } 2215 2216 const RegMask &Node::in_RegMask(uint) const { 2217 ShouldNotCallThis(); 2218 return _not_used_at_all; 2219 } 2220 2221 //============================================================================= 2222 //----------------------------------------------------------------------------- 2223 void Node_Array::reset( Arena *new_arena ) { 2224 _a->Afree(_nodes,_max*sizeof(Node*)); 2225 _max = 0; 2226 _nodes = NULL; 2227 _a = new_arena; 2228 } 2229 2230 //------------------------------clear------------------------------------------ 2231 // Clear all entries in _nodes to NULL but keep storage 2232 void Node_Array::clear() { 2233 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) ); 2234 } 2235 2236 //----------------------------------------------------------------------------- 2237 void Node_Array::grow( uint i ) { 2238 if( !_max ) { 2239 _max = 1; 2240 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) ); 2241 _nodes[0] = NULL; 2242 } 2243 uint old = _max; 2244 while( i >= _max ) _max <<= 1; // Double to fit 2245 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*)); 2246 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) ); 2247 } 2248 2249 //----------------------------------------------------------------------------- 2250 void Node_Array::insert( uint i, Node *n ) { 2251 if( _nodes[_max-1] ) grow(_max); // Get more space if full 2252 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*))); 2253 _nodes[i] = n; 2254 } 2255 2256 //----------------------------------------------------------------------------- 2257 void Node_Array::remove( uint i ) { 2258 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*))); 2259 _nodes[_max-1] = NULL; 2260 } 2261 2262 //----------------------------------------------------------------------------- 2263 void Node_Array::sort( C_sort_func_t func) { 2264 qsort( _nodes, _max, sizeof( Node* ), func ); 2265 } 2266 2267 //----------------------------------------------------------------------------- 2268 void Node_Array::dump() const { 2269 #ifndef PRODUCT 2270 for( uint i = 0; i < _max; i++ ) { 2271 Node *nn = _nodes[i]; 2272 if( nn != NULL ) { 2273 tty->print("%5d--> ",i); nn->dump(); 2274 } 2275 } 2276 #endif 2277 } 2278 2279 //--------------------------is_iteratively_computed------------------------------ 2280 // Operation appears to be iteratively computed (such as an induction variable) 2281 // It is possible for this operation to return false for a loop-varying 2282 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 2283 bool Node::is_iteratively_computed() { 2284 if (ideal_reg()) { // does operation have a result register? 2285 for (uint i = 1; i < req(); i++) { 2286 Node* n = in(i); 2287 if (n != NULL && n->is_Phi()) { 2288 for (uint j = 1; j < n->req(); j++) { 2289 if (n->in(j) == this) { 2290 return true; 2291 } 2292 } 2293 } 2294 } 2295 } 2296 return false; 2297 } 2298 2299 //--------------------------find_similar------------------------------ 2300 // Return a node with opcode "opc" and same inputs as "this" if one can 2301 // be found; Otherwise return NULL; 2302 Node* Node::find_similar(int opc) { 2303 if (req() >= 2) { 2304 Node* def = in(1); 2305 if (def && def->outcnt() >= 2) { 2306 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) { 2307 Node* use = def->fast_out(i); 2308 if (use != this && 2309 use->Opcode() == opc && 2310 use->req() == req()) { 2311 uint j; 2312 for (j = 0; j < use->req(); j++) { 2313 if (use->in(j) != in(j)) { 2314 break; 2315 } 2316 } 2317 if (j == use->req()) { 2318 return use; 2319 } 2320 } 2321 } 2322 } 2323 } 2324 return NULL; 2325 } 2326 2327 2328 //--------------------------unique_ctrl_out------------------------------ 2329 // Return the unique control out if only one. Null if none or more than one. 2330 Node* Node::unique_ctrl_out() const { 2331 Node* found = NULL; 2332 for (uint i = 0; i < outcnt(); i++) { 2333 Node* use = raw_out(i); 2334 if (use->is_CFG() && use != this) { 2335 if (found != NULL) return NULL; 2336 found = use; 2337 } 2338 } 2339 return found; 2340 } 2341 2342 void Node::ensure_control_or_add_prec(Node* c) { 2343 if (in(0) == NULL) { 2344 set_req(0, c); 2345 } else if (in(0) != c) { 2346 add_prec(c); 2347 } 2348 } 2349 2350 //============================================================================= 2351 //------------------------------yank------------------------------------------- 2352 // Find and remove 2353 void Node_List::yank( Node *n ) { 2354 uint i; 2355 for( i = 0; i < _cnt; i++ ) 2356 if( _nodes[i] == n ) 2357 break; 2358 2359 if( i < _cnt ) 2360 _nodes[i] = _nodes[--_cnt]; 2361 } 2362 2363 //------------------------------dump------------------------------------------- 2364 void Node_List::dump() const { 2365 #ifndef PRODUCT 2366 for( uint i = 0; i < _cnt; i++ ) 2367 if( _nodes[i] ) { 2368 tty->print("%5d--> ",i); 2369 _nodes[i]->dump(); 2370 } 2371 #endif 2372 } 2373 2374 void Node_List::dump_simple() const { 2375 #ifndef PRODUCT 2376 for( uint i = 0; i < _cnt; i++ ) 2377 if( _nodes[i] ) { 2378 tty->print(" %d", _nodes[i]->_idx); 2379 } else { 2380 tty->print(" NULL"); 2381 } 2382 #endif 2383 } 2384 2385 //============================================================================= 2386 //------------------------------remove----------------------------------------- 2387 void Unique_Node_List::remove( Node *n ) { 2388 if( _in_worklist[n->_idx] ) { 2389 for( uint i = 0; i < size(); i++ ) 2390 if( _nodes[i] == n ) { 2391 map(i,Node_List::pop()); 2392 _in_worklist >>= n->_idx; 2393 return; 2394 } 2395 ShouldNotReachHere(); 2396 } 2397 } 2398 2399 //-----------------------remove_useless_nodes---------------------------------- 2400 // Remove useless nodes from worklist 2401 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { 2402 2403 for( uint i = 0; i < size(); ++i ) { 2404 Node *n = at(i); 2405 assert( n != NULL, "Did not expect null entries in worklist"); 2406 if( ! useful.test(n->_idx) ) { 2407 _in_worklist >>= n->_idx; 2408 map(i,Node_List::pop()); 2409 // Node *replacement = Node_List::pop(); 2410 // if( i != size() ) { // Check if removing last entry 2411 // _nodes[i] = replacement; 2412 // } 2413 --i; // Visit popped node 2414 // If it was last entry, loop terminates since size() was also reduced 2415 } 2416 } 2417 } 2418 2419 //============================================================================= 2420 void Node_Stack::grow() { 2421 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top 2422 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode)); 2423 size_t max = old_max << 1; // max * 2 2424 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max); 2425 _inode_max = _inodes + max; 2426 _inode_top = _inodes + old_top; // restore _top 2427 } 2428 2429 // Node_Stack is used to map nodes. 2430 Node* Node_Stack::find(uint idx) const { 2431 uint sz = size(); 2432 for (uint i=0; i < sz; i++) { 2433 if (idx == index_at(i) ) 2434 return node_at(i); 2435 } 2436 return NULL; 2437 } 2438 2439 //============================================================================= 2440 uint TypeNode::size_of() const { return sizeof(*this); } 2441 #ifndef PRODUCT 2442 void TypeNode::dump_spec(outputStream *st) const { 2443 if( !Verbose && !WizardMode ) { 2444 // standard dump does this in Verbose and WizardMode 2445 st->print(" #"); _type->dump_on(st); 2446 } 2447 } 2448 2449 void TypeNode::dump_compact_spec(outputStream *st) const { 2450 st->print("#"); 2451 _type->dump_on(st); 2452 } 2453 #endif 2454 uint TypeNode::hash() const { 2455 return Node::hash() + _type->hash(); 2456 } 2457 uint TypeNode::cmp( const Node &n ) const 2458 { return !Type::cmp( _type, ((TypeNode&)n)._type ); } 2459 const Type *TypeNode::bottom_type() const { return _type; } 2460 const Type* TypeNode::Value(PhaseGVN* phase) const { return _type; } 2461 2462 //------------------------------ideal_reg-------------------------------------- 2463 uint TypeNode::ideal_reg() const { 2464 return _type->ideal_reg(); 2465 }