1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/cfgnode.hpp" 29 #include "opto/connode.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/machnode.hpp" 32 #include "opto/matcher.hpp" 33 #include "opto/node.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/regmask.hpp" 36 #include "opto/type.hpp" 37 #include "utilities/copy.hpp" 38 39 class RegMask; 40 // #include "phase.hpp" 41 class PhaseTransform; 42 class PhaseGVN; 43 44 // Arena we are currently building Nodes in 45 const uint Node::NotAMachineReg = 0xffff0000; 46 47 #ifndef PRODUCT 48 extern int nodes_created; 49 #endif 50 51 #ifdef ASSERT 52 53 //-------------------------- construct_node------------------------------------ 54 // Set a breakpoint here to identify where a particular node index is built. 55 void Node::verify_construction() { 56 _debug_orig = NULL; 57 int old_debug_idx = Compile::debug_idx(); 58 int new_debug_idx = old_debug_idx+1; 59 if (new_debug_idx > 0) { 60 // Arrange that the lowest five decimal digits of _debug_idx 61 // will repeat those of _idx. In case this is somehow pathological, 62 // we continue to assign negative numbers (!) consecutively. 63 const int mod = 100000; 64 int bump = (int)(_idx - new_debug_idx) % mod; 65 if (bump < 0) bump += mod; 66 assert(bump >= 0 && bump < mod, ""); 67 new_debug_idx += bump; 68 } 69 Compile::set_debug_idx(new_debug_idx); 70 set_debug_idx( new_debug_idx ); 71 assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX"); 72 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { 73 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); 74 BREAKPOINT; 75 } 76 #if OPTO_DU_ITERATOR_ASSERT 77 _last_del = NULL; 78 _del_tick = 0; 79 #endif 80 _hash_lock = 0; 81 } 82 83 84 // #ifdef ASSERT ... 85 86 #if OPTO_DU_ITERATOR_ASSERT 87 void DUIterator_Common::sample(const Node* node) { 88 _vdui = VerifyDUIterators; 89 _node = node; 90 _outcnt = node->_outcnt; 91 _del_tick = node->_del_tick; 92 _last = NULL; 93 } 94 95 void DUIterator_Common::verify(const Node* node, bool at_end_ok) { 96 assert(_node == node, "consistent iterator source"); 97 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed"); 98 } 99 100 void DUIterator_Common::verify_resync() { 101 // Ensure that the loop body has just deleted the last guy produced. 102 const Node* node = _node; 103 // Ensure that at least one copy of the last-seen edge was deleted. 104 // Note: It is OK to delete multiple copies of the last-seen edge. 105 // Unfortunately, we have no way to verify that all the deletions delete 106 // that same edge. On this point we must use the Honor System. 107 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge"); 108 assert(node->_last_del == _last, "must have deleted the edge just produced"); 109 // We liked this deletion, so accept the resulting outcnt and tick. 110 _outcnt = node->_outcnt; 111 _del_tick = node->_del_tick; 112 } 113 114 void DUIterator_Common::reset(const DUIterator_Common& that) { 115 if (this == &that) return; // ignore assignment to self 116 if (!_vdui) { 117 // We need to initialize everything, overwriting garbage values. 118 _last = that._last; 119 _vdui = that._vdui; 120 } 121 // Note: It is legal (though odd) for an iterator over some node x 122 // to be reassigned to iterate over another node y. Some doubly-nested 123 // progress loops depend on being able to do this. 124 const Node* node = that._node; 125 // Re-initialize everything, except _last. 126 _node = node; 127 _outcnt = node->_outcnt; 128 _del_tick = node->_del_tick; 129 } 130 131 void DUIterator::sample(const Node* node) { 132 DUIterator_Common::sample(node); // Initialize the assertion data. 133 _refresh_tick = 0; // No refreshes have happened, as yet. 134 } 135 136 void DUIterator::verify(const Node* node, bool at_end_ok) { 137 DUIterator_Common::verify(node, at_end_ok); 138 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range"); 139 } 140 141 void DUIterator::verify_increment() { 142 if (_refresh_tick & 1) { 143 // We have refreshed the index during this loop. 144 // Fix up _idx to meet asserts. 145 if (_idx > _outcnt) _idx = _outcnt; 146 } 147 verify(_node, true); 148 } 149 150 void DUIterator::verify_resync() { 151 // Note: We do not assert on _outcnt, because insertions are OK here. 152 DUIterator_Common::verify_resync(); 153 // Make sure we are still in sync, possibly with no more out-edges: 154 verify(_node, true); 155 } 156 157 void DUIterator::reset(const DUIterator& that) { 158 if (this == &that) return; // self assignment is always a no-op 159 assert(that._refresh_tick == 0, "assign only the result of Node::outs()"); 160 assert(that._idx == 0, "assign only the result of Node::outs()"); 161 assert(_idx == that._idx, "already assigned _idx"); 162 if (!_vdui) { 163 // We need to initialize everything, overwriting garbage values. 164 sample(that._node); 165 } else { 166 DUIterator_Common::reset(that); 167 if (_refresh_tick & 1) { 168 _refresh_tick++; // Clear the "was refreshed" flag. 169 } 170 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly"); 171 } 172 } 173 174 void DUIterator::refresh() { 175 DUIterator_Common::sample(_node); // Re-fetch assertion data. 176 _refresh_tick |= 1; // Set the "was refreshed" flag. 177 } 178 179 void DUIterator::verify_finish() { 180 // If the loop has killed the node, do not require it to re-run. 181 if (_node->_outcnt == 0) _refresh_tick &= ~1; 182 // If this assert triggers, it means that a loop used refresh_out_pos 183 // to re-synch an iteration index, but the loop did not correctly 184 // re-run itself, using a "while (progress)" construct. 185 // This iterator enforces the rule that you must keep trying the loop 186 // until it "runs clean" without any need for refreshing. 187 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing"); 188 } 189 190 191 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) { 192 DUIterator_Common::verify(node, at_end_ok); 193 Node** out = node->_out; 194 uint cnt = node->_outcnt; 195 assert(cnt == _outcnt, "no insertions allowed"); 196 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range"); 197 // This last check is carefully designed to work for NO_OUT_ARRAY. 198 } 199 200 void DUIterator_Fast::verify_limit() { 201 const Node* node = _node; 202 verify(node, true); 203 assert(_outp == node->_out + node->_outcnt, "limit still correct"); 204 } 205 206 void DUIterator_Fast::verify_resync() { 207 const Node* node = _node; 208 if (_outp == node->_out + _outcnt) { 209 // Note that the limit imax, not the pointer i, gets updated with the 210 // exact count of deletions. (For the pointer it's always "--i".) 211 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)"); 212 // This is a limit pointer, with a name like "imax". 213 // Fudge the _last field so that the common assert will be happy. 214 _last = (Node*) node->_last_del; 215 DUIterator_Common::verify_resync(); 216 } else { 217 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)"); 218 // A normal internal pointer. 219 DUIterator_Common::verify_resync(); 220 // Make sure we are still in sync, possibly with no more out-edges: 221 verify(node, true); 222 } 223 } 224 225 void DUIterator_Fast::verify_relimit(uint n) { 226 const Node* node = _node; 227 assert((int)n > 0, "use imax -= n only with a positive count"); 228 // This must be a limit pointer, with a name like "imax". 229 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)"); 230 // The reported number of deletions must match what the node saw. 231 assert(node->_del_tick == _del_tick + n, "must have deleted n edges"); 232 // Fudge the _last field so that the common assert will be happy. 233 _last = (Node*) node->_last_del; 234 DUIterator_Common::verify_resync(); 235 } 236 237 void DUIterator_Fast::reset(const DUIterator_Fast& that) { 238 assert(_outp == that._outp, "already assigned _outp"); 239 DUIterator_Common::reset(that); 240 } 241 242 void DUIterator_Last::verify(const Node* node, bool at_end_ok) { 243 // at_end_ok means the _outp is allowed to underflow by 1 244 _outp += at_end_ok; 245 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc. 246 _outp -= at_end_ok; 247 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes"); 248 } 249 250 void DUIterator_Last::verify_limit() { 251 // Do not require the limit address to be resynched. 252 //verify(node, true); 253 assert(_outp == _node->_out, "limit still correct"); 254 } 255 256 void DUIterator_Last::verify_step(uint num_edges) { 257 assert((int)num_edges > 0, "need non-zero edge count for loop progress"); 258 _outcnt -= num_edges; 259 _del_tick += num_edges; 260 // Make sure we are still in sync, possibly with no more out-edges: 261 const Node* node = _node; 262 verify(node, true); 263 assert(node->_last_del == _last, "must have deleted the edge just produced"); 264 } 265 266 #endif //OPTO_DU_ITERATOR_ASSERT 267 268 269 #endif //ASSERT 270 271 272 // This constant used to initialize _out may be any non-null value. 273 // The value NULL is reserved for the top node only. 274 #define NO_OUT_ARRAY ((Node**)-1) 275 276 // This funny expression handshakes with Node::operator new 277 // to pull Compile::current out of the new node's _out field, 278 // and then calls a subroutine which manages most field 279 // initializations. The only one which is tricky is the 280 // _idx field, which is const, and so must be initialized 281 // by a return value, not an assignment. 282 // 283 // (Aren't you thankful that Java finals don't require so many tricks?) 284 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out) 285 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355 286 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 287 #endif 288 289 // Out-of-line code from node constructors. 290 // Executed only when extra debug info. is being passed around. 291 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) { 292 C->set_node_notes_at(idx, nn); 293 } 294 295 // Shared initialization code. 296 inline int Node::Init(int req, Compile* C) { 297 assert(Compile::current() == C, "must use operator new(Compile*)"); 298 int idx = C->next_unique(); 299 300 // Allocate memory for the necessary number of edges. 301 if (req > 0) { 302 // Allocate space for _in array to have double alignment. 303 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*)))); 304 #ifdef ASSERT 305 _in[req-1] = this; // magic cookie for assertion check 306 #endif 307 } 308 // If there are default notes floating around, capture them: 309 Node_Notes* nn = C->default_node_notes(); 310 if (nn != NULL) init_node_notes(C, idx, nn); 311 312 // Note: At this point, C is dead, 313 // and we begin to initialize the new Node. 314 315 _cnt = _max = req; 316 _outcnt = _outmax = 0; 317 _class_id = Class_Node; 318 _flags = 0; 319 _out = NO_OUT_ARRAY; 320 return idx; 321 } 322 323 //------------------------------Node------------------------------------------- 324 // Create a Node, with a given number of required edges. 325 Node::Node(uint req) 326 : _idx(IDX_INIT(req)) 327 { 328 assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" ); 329 debug_only( verify_construction() ); 330 NOT_PRODUCT(nodes_created++); 331 if (req == 0) { 332 assert( _in == (Node**)this, "Must not pass arg count to 'new'" ); 333 _in = NULL; 334 } else { 335 assert( _in[req-1] == this, "Must pass arg count to 'new'" ); 336 Node** to = _in; 337 for(uint i = 0; i < req; i++) { 338 to[i] = NULL; 339 } 340 } 341 } 342 343 //------------------------------Node------------------------------------------- 344 Node::Node(Node *n0) 345 : _idx(IDX_INIT(1)) 346 { 347 debug_only( verify_construction() ); 348 NOT_PRODUCT(nodes_created++); 349 // Assert we allocated space for input array already 350 assert( _in[0] == this, "Must pass arg count to 'new'" ); 351 assert( is_not_dead(n0), "can not use dead node"); 352 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 353 } 354 355 //------------------------------Node------------------------------------------- 356 Node::Node(Node *n0, Node *n1) 357 : _idx(IDX_INIT(2)) 358 { 359 debug_only( verify_construction() ); 360 NOT_PRODUCT(nodes_created++); 361 // Assert we allocated space for input array already 362 assert( _in[1] == this, "Must pass arg count to 'new'" ); 363 assert( is_not_dead(n0), "can not use dead node"); 364 assert( is_not_dead(n1), "can not use dead node"); 365 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 366 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 367 } 368 369 //------------------------------Node------------------------------------------- 370 Node::Node(Node *n0, Node *n1, Node *n2) 371 : _idx(IDX_INIT(3)) 372 { 373 debug_only( verify_construction() ); 374 NOT_PRODUCT(nodes_created++); 375 // Assert we allocated space for input array already 376 assert( _in[2] == this, "Must pass arg count to 'new'" ); 377 assert( is_not_dead(n0), "can not use dead node"); 378 assert( is_not_dead(n1), "can not use dead node"); 379 assert( is_not_dead(n2), "can not use dead node"); 380 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 381 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 382 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 383 } 384 385 //------------------------------Node------------------------------------------- 386 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) 387 : _idx(IDX_INIT(4)) 388 { 389 debug_only( verify_construction() ); 390 NOT_PRODUCT(nodes_created++); 391 // Assert we allocated space for input array already 392 assert( _in[3] == this, "Must pass arg count to 'new'" ); 393 assert( is_not_dead(n0), "can not use dead node"); 394 assert( is_not_dead(n1), "can not use dead node"); 395 assert( is_not_dead(n2), "can not use dead node"); 396 assert( is_not_dead(n3), "can not use dead node"); 397 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 398 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 399 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 400 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 401 } 402 403 //------------------------------Node------------------------------------------- 404 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) 405 : _idx(IDX_INIT(5)) 406 { 407 debug_only( verify_construction() ); 408 NOT_PRODUCT(nodes_created++); 409 // Assert we allocated space for input array already 410 assert( _in[4] == this, "Must pass arg count to 'new'" ); 411 assert( is_not_dead(n0), "can not use dead node"); 412 assert( is_not_dead(n1), "can not use dead node"); 413 assert( is_not_dead(n2), "can not use dead node"); 414 assert( is_not_dead(n3), "can not use dead node"); 415 assert( is_not_dead(n4), "can not use dead node"); 416 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 417 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 418 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 419 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 420 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 421 } 422 423 //------------------------------Node------------------------------------------- 424 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 425 Node *n4, Node *n5) 426 : _idx(IDX_INIT(6)) 427 { 428 debug_only( verify_construction() ); 429 NOT_PRODUCT(nodes_created++); 430 // Assert we allocated space for input array already 431 assert( _in[5] == this, "Must pass arg count to 'new'" ); 432 assert( is_not_dead(n0), "can not use dead node"); 433 assert( is_not_dead(n1), "can not use dead node"); 434 assert( is_not_dead(n2), "can not use dead node"); 435 assert( is_not_dead(n3), "can not use dead node"); 436 assert( is_not_dead(n4), "can not use dead node"); 437 assert( is_not_dead(n5), "can not use dead node"); 438 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 439 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 440 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 441 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 442 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 443 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 444 } 445 446 //------------------------------Node------------------------------------------- 447 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 448 Node *n4, Node *n5, Node *n6) 449 : _idx(IDX_INIT(7)) 450 { 451 debug_only( verify_construction() ); 452 NOT_PRODUCT(nodes_created++); 453 // Assert we allocated space for input array already 454 assert( _in[6] == this, "Must pass arg count to 'new'" ); 455 assert( is_not_dead(n0), "can not use dead node"); 456 assert( is_not_dead(n1), "can not use dead node"); 457 assert( is_not_dead(n2), "can not use dead node"); 458 assert( is_not_dead(n3), "can not use dead node"); 459 assert( is_not_dead(n4), "can not use dead node"); 460 assert( is_not_dead(n5), "can not use dead node"); 461 assert( is_not_dead(n6), "can not use dead node"); 462 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 463 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 464 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 465 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 466 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 467 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 468 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); 469 } 470 471 472 //------------------------------clone------------------------------------------ 473 // Clone a Node. 474 Node *Node::clone() const { 475 Compile *compile = Compile::current(); 476 uint s = size_of(); // Size of inherited Node 477 Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); 478 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); 479 // Set the new input pointer array 480 n->_in = (Node**)(((char*)n)+s); 481 // Cannot share the old output pointer array, so kill it 482 n->_out = NO_OUT_ARRAY; 483 // And reset the counters to 0 484 n->_outcnt = 0; 485 n->_outmax = 0; 486 // Unlock this guy, since he is not in any hash table. 487 debug_only(n->_hash_lock = 0); 488 // Walk the old node's input list to duplicate its edges 489 uint i; 490 for( i = 0; i < len(); i++ ) { 491 Node *x = in(i); 492 n->_in[i] = x; 493 if (x != NULL) x->add_out(n); 494 } 495 if (is_macro()) 496 compile->add_macro_node(n); 497 if (is_expensive()) 498 compile->add_expensive_node(n); 499 500 n->set_idx(compile->next_unique()); // Get new unique index as well 501 debug_only( n->verify_construction() ); 502 NOT_PRODUCT(nodes_created++); 503 // Do not patch over the debug_idx of a clone, because it makes it 504 // impossible to break on the clone's moment of creation. 505 //debug_only( n->set_debug_idx( debug_idx() ) ); 506 507 compile->copy_node_notes_to(n, (Node*) this); 508 509 // MachNode clone 510 uint nopnds; 511 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) { 512 MachNode *mach = n->as_Mach(); 513 MachNode *mthis = this->as_Mach(); 514 // Get address of _opnd_array. 515 // It should be the same offset since it is the clone of this node. 516 MachOper **from = mthis->_opnds; 517 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 518 pointer_delta((const void*)from, 519 (const void*)(&mthis->_opnds), 1)); 520 mach->_opnds = to; 521 for ( uint i = 0; i < nopnds; ++i ) { 522 to[i] = from[i]->clone(compile); 523 } 524 } 525 // cloning CallNode may need to clone JVMState 526 if (n->is_Call()) { 527 CallNode *call = n->as_Call(); 528 call->clone_jvms(); 529 } 530 return n; // Return the clone 531 } 532 533 //---------------------------setup_is_top-------------------------------------- 534 // Call this when changing the top node, to reassert the invariants 535 // required by Node::is_top. See Compile::set_cached_top_node. 536 void Node::setup_is_top() { 537 if (this == (Node*)Compile::current()->top()) { 538 // This node has just become top. Kill its out array. 539 _outcnt = _outmax = 0; 540 _out = NULL; // marker value for top 541 assert(is_top(), "must be top"); 542 } else { 543 if (_out == NULL) _out = NO_OUT_ARRAY; 544 assert(!is_top(), "must not be top"); 545 } 546 } 547 548 549 //------------------------------~Node------------------------------------------ 550 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 551 extern int reclaim_idx ; 552 extern int reclaim_in ; 553 extern int reclaim_node; 554 void Node::destruct() { 555 // Eagerly reclaim unique Node numberings 556 Compile* compile = Compile::current(); 557 if ((uint)_idx+1 == compile->unique()) { 558 compile->set_unique(compile->unique()-1); 559 #ifdef ASSERT 560 reclaim_idx++; 561 #endif 562 } 563 // Clear debug info: 564 Node_Notes* nn = compile->node_notes_at(_idx); 565 if (nn != NULL) nn->clear(); 566 // Walk the input array, freeing the corresponding output edges 567 _cnt = _max; // forget req/prec distinction 568 uint i; 569 for( i = 0; i < _max; i++ ) { 570 set_req(i, NULL); 571 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); 572 } 573 assert(outcnt() == 0, "deleting a node must not leave a dangling use"); 574 // See if the input array was allocated just prior to the object 575 int edge_size = _max*sizeof(void*); 576 int out_edge_size = _outmax*sizeof(void*); 577 char *edge_end = ((char*)_in) + edge_size; 578 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); 579 char *out_edge_end = out_array + out_edge_size; 580 int node_size = size_of(); 581 582 // Free the output edge array 583 if (out_edge_size > 0) { 584 #ifdef ASSERT 585 if( out_edge_end == compile->node_arena()->hwm() ) 586 reclaim_in += out_edge_size; // count reclaimed out edges with in edges 587 #endif 588 compile->node_arena()->Afree(out_array, out_edge_size); 589 } 590 591 // Free the input edge array and the node itself 592 if( edge_end == (char*)this ) { 593 #ifdef ASSERT 594 if( edge_end+node_size == compile->node_arena()->hwm() ) { 595 reclaim_in += edge_size; 596 reclaim_node+= node_size; 597 } 598 #else 599 // It was; free the input array and object all in one hit 600 compile->node_arena()->Afree(_in,edge_size+node_size); 601 #endif 602 } else { 603 604 // Free just the input array 605 #ifdef ASSERT 606 if( edge_end == compile->node_arena()->hwm() ) 607 reclaim_in += edge_size; 608 #endif 609 compile->node_arena()->Afree(_in,edge_size); 610 611 // Free just the object 612 #ifdef ASSERT 613 if( ((char*)this) + node_size == compile->node_arena()->hwm() ) 614 reclaim_node+= node_size; 615 #else 616 compile->node_arena()->Afree(this,node_size); 617 #endif 618 } 619 if (is_macro()) { 620 compile->remove_macro_node(this); 621 } 622 if (is_expensive()) { 623 compile->remove_expensive_node(this); 624 } 625 #ifdef ASSERT 626 // We will not actually delete the storage, but we'll make the node unusable. 627 *(address*)this = badAddress; // smash the C++ vtbl, probably 628 _in = _out = (Node**) badAddress; 629 _max = _cnt = _outmax = _outcnt = 0; 630 #endif 631 } 632 633 //------------------------------grow------------------------------------------- 634 // Grow the input array, making space for more edges 635 void Node::grow( uint len ) { 636 Arena* arena = Compile::current()->node_arena(); 637 uint new_max = _max; 638 if( new_max == 0 ) { 639 _max = 4; 640 _in = (Node**)arena->Amalloc(4*sizeof(Node*)); 641 Node** to = _in; 642 to[0] = NULL; 643 to[1] = NULL; 644 to[2] = NULL; 645 to[3] = NULL; 646 return; 647 } 648 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 649 // Trimming to limit allows a uint8 to handle up to 255 edges. 650 // Previously I was using only powers-of-2 which peaked at 128 edges. 651 //if( new_max >= limit ) new_max = limit-1; 652 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); 653 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space 654 _max = new_max; // Record new max length 655 // This assertion makes sure that Node::_max is wide enough to 656 // represent the numerical value of new_max. 657 assert(_max == new_max && _max > len, "int width of _max is too small"); 658 } 659 660 //-----------------------------out_grow---------------------------------------- 661 // Grow the input array, making space for more edges 662 void Node::out_grow( uint len ) { 663 assert(!is_top(), "cannot grow a top node's out array"); 664 Arena* arena = Compile::current()->node_arena(); 665 uint new_max = _outmax; 666 if( new_max == 0 ) { 667 _outmax = 4; 668 _out = (Node **)arena->Amalloc(4*sizeof(Node*)); 669 return; 670 } 671 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 672 // Trimming to limit allows a uint8 to handle up to 255 edges. 673 // Previously I was using only powers-of-2 which peaked at 128 edges. 674 //if( new_max >= limit ) new_max = limit-1; 675 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); 676 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); 677 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space 678 _outmax = new_max; // Record new max length 679 // This assertion makes sure that Node::_max is wide enough to 680 // represent the numerical value of new_max. 681 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small"); 682 } 683 684 #ifdef ASSERT 685 //------------------------------is_dead---------------------------------------- 686 bool Node::is_dead() const { 687 // Mach and pinch point nodes may look like dead. 688 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) 689 return false; 690 for( uint i = 0; i < _max; i++ ) 691 if( _in[i] != NULL ) 692 return false; 693 dump(); 694 return true; 695 } 696 #endif 697 698 699 //------------------------------is_unreachable--------------------------------- 700 bool Node::is_unreachable(PhaseIterGVN &igvn) const { 701 assert(!is_Mach(), "doesn't work with MachNodes"); 702 return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top(); 703 } 704 705 //------------------------------add_req---------------------------------------- 706 // Add a new required input at the end 707 void Node::add_req( Node *n ) { 708 assert( is_not_dead(n), "can not use dead node"); 709 710 // Look to see if I can move precedence down one without reallocating 711 if( (_cnt >= _max) || (in(_max-1) != NULL) ) 712 grow( _max+1 ); 713 714 // Find a precedence edge to move 715 if( in(_cnt) != NULL ) { // Next precedence edge is busy? 716 uint i; 717 for( i=_cnt; i<_max; i++ ) 718 if( in(i) == NULL ) // Find the NULL at end of prec edge list 719 break; // There must be one, since we grew the array 720 _in[i] = in(_cnt); // Move prec over, making space for req edge 721 } 722 _in[_cnt++] = n; // Stuff over old prec edge 723 if (n != NULL) n->add_out((Node *)this); 724 } 725 726 //---------------------------add_req_batch------------------------------------- 727 // Add a new required input at the end 728 void Node::add_req_batch( Node *n, uint m ) { 729 assert( is_not_dead(n), "can not use dead node"); 730 // check various edge cases 731 if ((int)m <= 1) { 732 assert((int)m >= 0, "oob"); 733 if (m != 0) add_req(n); 734 return; 735 } 736 737 // Look to see if I can move precedence down one without reallocating 738 if( (_cnt+m) > _max || _in[_max-m] ) 739 grow( _max+m ); 740 741 // Find a precedence edge to move 742 if( _in[_cnt] != NULL ) { // Next precedence edge is busy? 743 uint i; 744 for( i=_cnt; i<_max; i++ ) 745 if( _in[i] == NULL ) // Find the NULL at end of prec edge list 746 break; // There must be one, since we grew the array 747 // Slide all the precs over by m positions (assume #prec << m). 748 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); 749 } 750 751 // Stuff over the old prec edges 752 for(uint i=0; i<m; i++ ) { 753 _in[_cnt++] = n; 754 } 755 756 // Insert multiple out edges on the node. 757 if (n != NULL && !n->is_top()) { 758 for(uint i=0; i<m; i++ ) { 759 n->add_out((Node *)this); 760 } 761 } 762 } 763 764 //------------------------------del_req---------------------------------------- 765 // Delete the required edge and compact the edge array 766 void Node::del_req( uint idx ) { 767 assert( idx < _cnt, "oob"); 768 assert( !VerifyHashTableKeys || _hash_lock == 0, 769 "remove node from hash table before modifying it"); 770 // First remove corresponding def-use edge 771 Node *n = in(idx); 772 if (n != NULL) n->del_out((Node *)this); 773 _in[idx] = in(--_cnt); // Compact the array 774 _in[_cnt] = NULL; // NULL out emptied slot 775 } 776 777 //------------------------------ins_req---------------------------------------- 778 // Insert a new required input at the end 779 void Node::ins_req( uint idx, Node *n ) { 780 assert( is_not_dead(n), "can not use dead node"); 781 add_req(NULL); // Make space 782 assert( idx < _max, "Must have allocated enough space"); 783 // Slide over 784 if(_cnt-idx-1 > 0) { 785 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); 786 } 787 _in[idx] = n; // Stuff over old required edge 788 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge 789 } 790 791 //-----------------------------find_edge--------------------------------------- 792 int Node::find_edge(Node* n) { 793 for (uint i = 0; i < len(); i++) { 794 if (_in[i] == n) return i; 795 } 796 return -1; 797 } 798 799 //----------------------------replace_edge------------------------------------- 800 int Node::replace_edge(Node* old, Node* neww) { 801 if (old == neww) return 0; // nothing to do 802 uint nrep = 0; 803 for (uint i = 0; i < len(); i++) { 804 if (in(i) == old) { 805 if (i < req()) 806 set_req(i, neww); 807 else 808 set_prec(i, neww); 809 nrep++; 810 } 811 } 812 return nrep; 813 } 814 815 //-------------------------disconnect_inputs----------------------------------- 816 // NULL out all inputs to eliminate incoming Def-Use edges. 817 // Return the number of edges between 'n' and 'this' 818 int Node::disconnect_inputs(Node *n, Compile* C) { 819 int edges_to_n = 0; 820 821 uint cnt = req(); 822 for( uint i = 0; i < cnt; ++i ) { 823 if( in(i) == 0 ) continue; 824 if( in(i) == n ) ++edges_to_n; 825 set_req(i, NULL); 826 } 827 // Remove precedence edges if any exist 828 // Note: Safepoints may have precedence edges, even during parsing 829 if( (req() != len()) && (in(req()) != NULL) ) { 830 uint max = len(); 831 for( uint i = 0; i < max; ++i ) { 832 if( in(i) == 0 ) continue; 833 if( in(i) == n ) ++edges_to_n; 834 set_prec(i, NULL); 835 } 836 } 837 838 // Node::destruct requires all out edges be deleted first 839 // debug_only(destruct();) // no reuse benefit expected 840 if (edges_to_n == 0) { 841 C->record_dead_node(_idx); 842 } 843 return edges_to_n; 844 } 845 846 //-----------------------------uncast--------------------------------------- 847 // %%% Temporary, until we sort out CheckCastPP vs. CastPP. 848 // Strip away casting. (It is depth-limited.) 849 Node* Node::uncast() const { 850 // Should be inline: 851 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; 852 if (is_ConstraintCast() || is_CheckCastPP()) 853 return uncast_helper(this); 854 else 855 return (Node*) this; 856 } 857 858 //---------------------------uncast_helper------------------------------------- 859 Node* Node::uncast_helper(const Node* p) { 860 #ifdef ASSERT 861 uint depth_count = 0; 862 const Node* orig_p = p; 863 #endif 864 865 while (true) { 866 #ifdef ASSERT 867 if (depth_count >= K) { 868 orig_p->dump(4); 869 if (p != orig_p) 870 p->dump(1); 871 } 872 assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); 873 #endif 874 if (p == NULL || p->req() != 2) { 875 break; 876 } else if (p->is_ConstraintCast()) { 877 p = p->in(1); 878 } else if (p->is_CheckCastPP()) { 879 p = p->in(1); 880 } else { 881 break; 882 } 883 } 884 return (Node*) p; 885 } 886 887 //------------------------------add_prec--------------------------------------- 888 // Add a new precedence input. Precedence inputs are unordered, with 889 // duplicates removed and NULLs packed down at the end. 890 void Node::add_prec( Node *n ) { 891 assert( is_not_dead(n), "can not use dead node"); 892 893 // Check for NULL at end 894 if( _cnt >= _max || in(_max-1) ) 895 grow( _max+1 ); 896 897 // Find a precedence edge to move 898 uint i = _cnt; 899 while( in(i) != NULL ) i++; 900 _in[i] = n; // Stuff prec edge over NULL 901 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge 902 } 903 904 //------------------------------rm_prec---------------------------------------- 905 // Remove a precedence input. Precedence inputs are unordered, with 906 // duplicates removed and NULLs packed down at the end. 907 void Node::rm_prec( uint j ) { 908 909 // Find end of precedence list to pack NULLs 910 uint i; 911 for( i=j; i<_max; i++ ) 912 if( !_in[i] ) // Find the NULL at end of prec edge list 913 break; 914 if (_in[j] != NULL) _in[j]->del_out((Node *)this); 915 _in[j] = _in[--i]; // Move last element over removed guy 916 _in[i] = NULL; // NULL out last element 917 } 918 919 //------------------------------size_of---------------------------------------- 920 uint Node::size_of() const { return sizeof(*this); } 921 922 //------------------------------ideal_reg-------------------------------------- 923 uint Node::ideal_reg() const { return 0; } 924 925 //------------------------------jvms------------------------------------------- 926 JVMState* Node::jvms() const { return NULL; } 927 928 #ifdef ASSERT 929 //------------------------------jvms------------------------------------------- 930 bool Node::verify_jvms(const JVMState* using_jvms) const { 931 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 932 if (jvms == using_jvms) return true; 933 } 934 return false; 935 } 936 937 //------------------------------init_NodeProperty------------------------------ 938 void Node::init_NodeProperty() { 939 assert(_max_classes <= max_jushort, "too many NodeProperty classes"); 940 assert(_max_flags <= max_jushort, "too many NodeProperty flags"); 941 } 942 #endif 943 944 //------------------------------format----------------------------------------- 945 // Print as assembly 946 void Node::format( PhaseRegAlloc *, outputStream *st ) const {} 947 //------------------------------emit------------------------------------------- 948 // Emit bytes starting at parameter 'ptr'. 949 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 950 //------------------------------lateExpand------------------------------------- 951 // Expand node after register allocation. 952 void Node::lateExpand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {} 953 //------------------------------size------------------------------------------- 954 // Size of instruction in bytes 955 uint Node::size(PhaseRegAlloc *ra_) const { return 0; } 956 957 //------------------------------CFG Construction------------------------------- 958 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root, 959 // Goto and Return. 960 const Node *Node::is_block_proj() const { return 0; } 961 962 // Minimum guaranteed type 963 const Type *Node::bottom_type() const { return Type::BOTTOM; } 964 965 966 //------------------------------raise_bottom_type------------------------------ 967 // Get the worst-case Type output for this Node. 968 void Node::raise_bottom_type(const Type* new_type) { 969 if (is_Type()) { 970 TypeNode *n = this->as_Type(); 971 if (VerifyAliases) { 972 assert(new_type->higher_equal(n->type()), "new type must refine old type"); 973 } 974 n->set_type(new_type); 975 } else if (is_Load()) { 976 LoadNode *n = this->as_Load(); 977 if (VerifyAliases) { 978 assert(new_type->higher_equal(n->type()), "new type must refine old type"); 979 } 980 n->set_type(new_type); 981 } 982 } 983 984 //------------------------------Identity--------------------------------------- 985 // Return a node that the given node is equivalent to. 986 Node *Node::Identity( PhaseTransform * ) { 987 return this; // Default to no identities 988 } 989 990 //------------------------------Value------------------------------------------ 991 // Compute a new Type for a node using the Type of the inputs. 992 const Type *Node::Value( PhaseTransform * ) const { 993 return bottom_type(); // Default to worst-case Type 994 } 995 996 //------------------------------Ideal------------------------------------------ 997 // 998 // 'Idealize' the graph rooted at this Node. 999 // 1000 // In order to be efficient and flexible there are some subtle invariants 1001 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks 1002 // these invariants, although its too slow to have on by default. If you are 1003 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN! 1004 // 1005 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this' 1006 // pointer. If ANY change is made, it must return the root of the reshaped 1007 // graph - even if the root is the same Node. Example: swapping the inputs 1008 // to an AddINode gives the same answer and same root, but you still have to 1009 // return the 'this' pointer instead of NULL. 1010 // 1011 // You cannot return an OLD Node, except for the 'this' pointer. Use the 1012 // Identity call to return an old Node; basically if Identity can find 1013 // another Node have the Ideal call make no change and return NULL. 1014 // Example: AddINode::Ideal must check for add of zero; in this case it 1015 // returns NULL instead of doing any graph reshaping. 1016 // 1017 // You cannot modify any old Nodes except for the 'this' pointer. Due to 1018 // sharing there may be other users of the old Nodes relying on their current 1019 // semantics. Modifying them will break the other users. 1020 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 1021 // "X+3" unchanged in case it is shared. 1022 // 1023 // If you modify the 'this' pointer's inputs, you should use 1024 // 'set_req'. If you are making a new Node (either as the new root or 1025 // some new internal piece) you may use 'init_req' to set the initial 1026 // value. You can make a new Node with either 'new' or 'clone'. In 1027 // either case, def-use info is correctly maintained. 1028 // 1029 // Example: reshape "(X+3)+4" into "X+7": 1030 // set_req(1, in(1)->in(1)); 1031 // set_req(2, phase->intcon(7)); 1032 // return this; 1033 // Example: reshape "X*4" into "X<<2" 1034 // return new (C) LShiftINode(in(1), phase->intcon(2)); 1035 // 1036 // You must call 'phase->transform(X)' on any new Nodes X you make, except 1037 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 1038 // Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5))); 1039 // return new (C) AddINode(shift, in(1)); 1040 // 1041 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 1042 // These forms are faster than 'phase->transform(new (C) ConNode())' and Do 1043 // The Right Thing with def-use info. 1044 // 1045 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped 1046 // graph uses the 'this' Node it must be the root. If you want a Node with 1047 // the same Opcode as the 'this' pointer use 'clone'. 1048 // 1049 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { 1050 return NULL; // Default to being Ideal already 1051 } 1052 1053 // Some nodes have specific Ideal subgraph transformations only if they are 1054 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1055 // for the transformations to happen. 1056 bool Node::has_special_unique_user() const { 1057 assert(outcnt() == 1, "match only for unique out"); 1058 Node* n = unique_out(); 1059 int op = Opcode(); 1060 if( this->is_Store() ) { 1061 // Condition for back-to-back stores folding. 1062 return n->Opcode() == op && n->in(MemNode::Memory) == this; 1063 } else if( op == Op_AddL ) { 1064 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) 1065 return n->Opcode() == Op_ConvL2I && n->in(1) == this; 1066 } else if( op == Op_SubI || op == Op_SubL ) { 1067 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y) 1068 return n->Opcode() == op && n->in(2) == this; 1069 } 1070 return false; 1071 }; 1072 1073 //--------------------------find_exact_control--------------------------------- 1074 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1075 Node* Node::find_exact_control(Node* ctrl) { 1076 if (ctrl == NULL && this->is_Region()) 1077 ctrl = this->as_Region()->is_copy(); 1078 1079 if (ctrl != NULL && ctrl->is_CatchProj()) { 1080 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) 1081 ctrl = ctrl->in(0); 1082 if (ctrl != NULL && !ctrl->is_top()) 1083 ctrl = ctrl->in(0); 1084 } 1085 1086 if (ctrl != NULL && ctrl->is_Proj()) 1087 ctrl = ctrl->in(0); 1088 1089 return ctrl; 1090 } 1091 1092 //--------------------------dominates------------------------------------------ 1093 // Helper function for MemNode::all_controls_dominate(). 1094 // Check if 'this' control node dominates or equal to 'sub' control node. 1095 // We already know that if any path back to Root or Start reaches 'this', 1096 // then all paths so, so this is a simple search for one example, 1097 // not an exhaustive search for a counterexample. 1098 bool Node::dominates(Node* sub, Node_List &nlist) { 1099 assert(this->is_CFG(), "expecting control"); 1100 assert(sub != NULL && sub->is_CFG(), "expecting control"); 1101 1102 // detect dead cycle without regions 1103 int iterations_without_region_limit = DominatorSearchLimit; 1104 1105 Node* orig_sub = sub; 1106 Node* dom = this; 1107 bool met_dom = false; 1108 nlist.clear(); 1109 1110 // Walk 'sub' backward up the chain to 'dom', watching for regions. 1111 // After seeing 'dom', continue up to Root or Start. 1112 // If we hit a region (backward split point), it may be a loop head. 1113 // Keep going through one of the region's inputs. If we reach the 1114 // same region again, go through a different input. Eventually we 1115 // will either exit through the loop head, or give up. 1116 // (If we get confused, break out and return a conservative 'false'.) 1117 while (sub != NULL) { 1118 if (sub->is_top()) break; // Conservative answer for dead code. 1119 if (sub == dom) { 1120 if (nlist.size() == 0) { 1121 // No Region nodes except loops were visited before and the EntryControl 1122 // path was taken for loops: it did not walk in a cycle. 1123 return true; 1124 } else if (met_dom) { 1125 break; // already met before: walk in a cycle 1126 } else { 1127 // Region nodes were visited. Continue walk up to Start or Root 1128 // to make sure that it did not walk in a cycle. 1129 met_dom = true; // first time meet 1130 iterations_without_region_limit = DominatorSearchLimit; // Reset 1131 } 1132 } 1133 if (sub->is_Start() || sub->is_Root()) { 1134 // Success if we met 'dom' along a path to Start or Root. 1135 // We assume there are no alternative paths that avoid 'dom'. 1136 // (This assumption is up to the caller to ensure!) 1137 return met_dom; 1138 } 1139 Node* up = sub->in(0); 1140 // Normalize simple pass-through regions and projections: 1141 up = sub->find_exact_control(up); 1142 // If sub == up, we found a self-loop. Try to push past it. 1143 if (sub == up && sub->is_Loop()) { 1144 // Take loop entry path on the way up to 'dom'. 1145 up = sub->in(1); // in(LoopNode::EntryControl); 1146 } else if (sub == up && sub->is_Region() && sub->req() != 3) { 1147 // Always take in(1) path on the way up to 'dom' for clone regions 1148 // (with only one input) or regions which merge > 2 paths 1149 // (usually used to merge fast/slow paths). 1150 up = sub->in(1); 1151 } else if (sub == up && sub->is_Region()) { 1152 // Try both paths for Regions with 2 input paths (it may be a loop head). 1153 // It could give conservative 'false' answer without information 1154 // which region's input is the entry path. 1155 iterations_without_region_limit = DominatorSearchLimit; // Reset 1156 1157 bool region_was_visited_before = false; 1158 // Was this Region node visited before? 1159 // If so, we have reached it because we accidentally took a 1160 // loop-back edge from 'sub' back into the body of the loop, 1161 // and worked our way up again to the loop header 'sub'. 1162 // So, take the first unexplored path on the way up to 'dom'. 1163 for (int j = nlist.size() - 1; j >= 0; j--) { 1164 intptr_t ni = (intptr_t)nlist.at(j); 1165 Node* visited = (Node*)(ni & ~1); 1166 bool visited_twice_already = ((ni & 1) != 0); 1167 if (visited == sub) { 1168 if (visited_twice_already) { 1169 // Visited 2 paths, but still stuck in loop body. Give up. 1170 return false; 1171 } 1172 // The Region node was visited before only once. 1173 // (We will repush with the low bit set, below.) 1174 nlist.remove(j); 1175 // We will find a new edge and re-insert. 1176 region_was_visited_before = true; 1177 break; 1178 } 1179 } 1180 1181 // Find an incoming edge which has not been seen yet; walk through it. 1182 assert(up == sub, ""); 1183 uint skip = region_was_visited_before ? 1 : 0; 1184 for (uint i = 1; i < sub->req(); i++) { 1185 Node* in = sub->in(i); 1186 if (in != NULL && !in->is_top() && in != sub) { 1187 if (skip == 0) { 1188 up = in; 1189 break; 1190 } 1191 --skip; // skip this nontrivial input 1192 } 1193 } 1194 1195 // Set 0 bit to indicate that both paths were taken. 1196 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); 1197 } 1198 1199 if (up == sub) { 1200 break; // some kind of tight cycle 1201 } 1202 if (up == orig_sub && met_dom) { 1203 // returned back after visiting 'dom' 1204 break; // some kind of cycle 1205 } 1206 if (--iterations_without_region_limit < 0) { 1207 break; // dead cycle 1208 } 1209 sub = up; 1210 } 1211 1212 // Did not meet Root or Start node in pred. chain. 1213 // Conservative answer for dead code. 1214 return false; 1215 } 1216 1217 //------------------------------remove_dead_region----------------------------- 1218 // This control node is dead. Follow the subgraph below it making everything 1219 // using it dead as well. This will happen normally via the usual IterGVN 1220 // worklist but this call is more efficient. Do not update use-def info 1221 // inside the dead region, just at the borders. 1222 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { 1223 // Con's are a popular node to re-hit in the hash table again. 1224 if( dead->is_Con() ) return; 1225 1226 // Can't put ResourceMark here since igvn->_worklist uses the same arena 1227 // for verify pass with +VerifyOpto and we add/remove elements in it here. 1228 Node_List nstack(Thread::current()->resource_area()); 1229 1230 Node *top = igvn->C->top(); 1231 nstack.push(dead); 1232 bool has_irreducible_loop = igvn->C->has_irreducible_loop(); 1233 1234 while (nstack.size() > 0) { 1235 dead = nstack.pop(); 1236 if (dead->outcnt() > 0) { 1237 // Keep dead node on stack until all uses are processed. 1238 nstack.push(dead); 1239 // For all Users of the Dead... ;-) 1240 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) { 1241 Node* use = dead->last_out(k); 1242 igvn->hash_delete(use); // Yank from hash table prior to mod 1243 if (use->in(0) == dead) { // Found another dead node 1244 assert (!use->is_Con(), "Control for Con node should be Root node."); 1245 use->set_req(0, top); // Cut dead edge to prevent processing 1246 nstack.push(use); // the dead node again. 1247 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop 1248 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode) 1249 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead 1250 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing 1251 use->set_req(0, top); // Cut self edge 1252 nstack.push(use); 1253 } else { // Else found a not-dead user 1254 // Dead if all inputs are top or null 1255 bool dead_use = !use->is_Root(); // Keep empty graph alive 1256 for (uint j = 1; j < use->req(); j++) { 1257 Node* in = use->in(j); 1258 if (in == dead) { // Turn all dead inputs into TOP 1259 use->set_req(j, top); 1260 } else if (in != NULL && !in->is_top()) { 1261 dead_use = false; 1262 } 1263 } 1264 if (dead_use) { 1265 if (use->is_Region()) { 1266 use->set_req(0, top); // Cut self edge 1267 } 1268 nstack.push(use); 1269 } else { 1270 igvn->_worklist.push(use); 1271 } 1272 } 1273 // Refresh the iterator, since any number of kills might have happened. 1274 k = dead->last_outs(kmin); 1275 } 1276 } else { // (dead->outcnt() == 0) 1277 // Done with outputs. 1278 igvn->hash_delete(dead); 1279 igvn->_worklist.remove(dead); 1280 igvn->set_type(dead, Type::TOP); 1281 if (dead->is_macro()) { 1282 igvn->C->remove_macro_node(dead); 1283 } 1284 if (dead->is_expensive()) { 1285 igvn->C->remove_expensive_node(dead); 1286 } 1287 igvn->C->record_dead_node(dead->_idx); 1288 // Kill all inputs to the dead guy 1289 for (uint i=0; i < dead->req(); i++) { 1290 Node *n = dead->in(i); // Get input to dead guy 1291 if (n != NULL && !n->is_top()) { // Input is valid? 1292 dead->set_req(i, top); // Smash input away 1293 if (n->outcnt() == 0) { // Input also goes dead? 1294 if (!n->is_Con()) 1295 nstack.push(n); // Clear it out as well 1296 } else if (n->outcnt() == 1 && 1297 n->has_special_unique_user()) { 1298 igvn->add_users_to_worklist( n ); 1299 } else if (n->outcnt() <= 2 && n->is_Store()) { 1300 // Push store's uses on worklist to enable folding optimization for 1301 // store/store and store/load to the same address. 1302 // The restriction (outcnt() <= 2) is the same as in set_req_X() 1303 // and remove_globally_dead_node(). 1304 igvn->add_users_to_worklist( n ); 1305 } 1306 } 1307 } 1308 } // (dead->outcnt() == 0) 1309 } // while (nstack.size() > 0) for outputs 1310 return; 1311 } 1312 1313 //------------------------------remove_dead_region----------------------------- 1314 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { 1315 Node *n = in(0); 1316 if( !n ) return false; 1317 // Lost control into this guy? I.e., it became unreachable? 1318 // Aggressively kill all unreachable code. 1319 if (can_reshape && n->is_top()) { 1320 kill_dead_code(this, phase->is_IterGVN()); 1321 return false; // Node is dead. 1322 } 1323 1324 if( n->is_Region() && n->as_Region()->is_copy() ) { 1325 Node *m = n->nonnull_req(); 1326 set_req(0, m); 1327 return true; 1328 } 1329 return false; 1330 } 1331 1332 //------------------------------Ideal_DU_postCCP------------------------------- 1333 // Idealize graph, using DU info. Must clone result into new-space 1334 Node *Node::Ideal_DU_postCCP( PhaseCCP * ) { 1335 return NULL; // Default to no change 1336 } 1337 1338 //------------------------------hash------------------------------------------- 1339 // Hash function over Nodes. 1340 uint Node::hash() const { 1341 uint sum = 0; 1342 for( uint i=0; i<_cnt; i++ ) // Add in all inputs 1343 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs 1344 return (sum>>2) + _cnt + Opcode(); 1345 } 1346 1347 //------------------------------cmp-------------------------------------------- 1348 // Compare special parts of simple Nodes 1349 uint Node::cmp( const Node &n ) const { 1350 return 1; // Must be same 1351 } 1352 1353 //------------------------------rematerialize----------------------------------- 1354 // Should we clone rather than spill this instruction? 1355 bool Node::rematerialize() const { 1356 if ( is_Mach() ) 1357 return this->as_Mach()->rematerialize(); 1358 else 1359 return (_flags & Flag_rematerialize) != 0; 1360 } 1361 1362 //------------------------------needs_anti_dependence_check--------------------- 1363 // Nodes which use memory without consuming it, hence need antidependences. 1364 bool Node::needs_anti_dependence_check() const { 1365 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 1366 return false; 1367 else 1368 return in(1)->bottom_type()->has_memory(); 1369 } 1370 1371 1372 // Get an integer constant from a ConNode (or CastIINode). 1373 // Return a default value if there is no apparent constant here. 1374 const TypeInt* Node::find_int_type() const { 1375 if (this->is_Type()) { 1376 return this->as_Type()->type()->isa_int(); 1377 } else if (this->is_Con()) { 1378 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1379 return this->bottom_type()->isa_int(); 1380 } 1381 return NULL; 1382 } 1383 1384 // Get a pointer constant from a ConstNode. 1385 // Returns the constant if it is a pointer ConstNode 1386 intptr_t Node::get_ptr() const { 1387 assert( Opcode() == Op_ConP, "" ); 1388 return ((ConPNode*)this)->type()->is_ptr()->get_con(); 1389 } 1390 1391 // Get a narrow oop constant from a ConNNode. 1392 intptr_t Node::get_narrowcon() const { 1393 assert( Opcode() == Op_ConN, "" ); 1394 return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); 1395 } 1396 1397 // Get a long constant from a ConNode. 1398 // Return a default value if there is no apparent constant here. 1399 const TypeLong* Node::find_long_type() const { 1400 if (this->is_Type()) { 1401 return this->as_Type()->type()->isa_long(); 1402 } else if (this->is_Con()) { 1403 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1404 return this->bottom_type()->isa_long(); 1405 } 1406 return NULL; 1407 } 1408 1409 1410 /** 1411 * Return a ptr type for nodes which should have it. 1412 */ 1413 const TypePtr* Node::get_ptr_type() const { 1414 const TypePtr* tp = this->bottom_type()->make_ptr(); 1415 #ifdef ASSERT 1416 if (tp == NULL) { 1417 this->dump(1); 1418 assert((tp != NULL), "unexpected node type"); 1419 } 1420 #endif 1421 return tp; 1422 } 1423 1424 // Get a double constant from a ConstNode. 1425 // Returns the constant if it is a double ConstNode 1426 jdouble Node::getd() const { 1427 assert( Opcode() == Op_ConD, "" ); 1428 return ((ConDNode*)this)->type()->is_double_constant()->getd(); 1429 } 1430 1431 // Get a float constant from a ConstNode. 1432 // Returns the constant if it is a float ConstNode 1433 jfloat Node::getf() const { 1434 assert( Opcode() == Op_ConF, "" ); 1435 return ((ConFNode*)this)->type()->is_float_constant()->getf(); 1436 } 1437 1438 #ifndef PRODUCT 1439 1440 //----------------------------NotANode---------------------------------------- 1441 // Used in debugging code to avoid walking across dead or uninitialized edges. 1442 static inline bool NotANode(const Node* n) { 1443 if (n == NULL) return true; 1444 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1445 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1446 return false; 1447 } 1448 1449 1450 //------------------------------find------------------------------------------ 1451 // Find a neighbor of this Node with the given _idx 1452 // If idx is negative, find its absolute value, following both _in and _out. 1453 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, 1454 VectorSet* old_space, VectorSet* new_space ) { 1455 int node_idx = (idx >= 0) ? idx : -idx; 1456 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. 1457 // Contained in new_space or old_space? Check old_arena first since it's mostly empty. 1458 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; 1459 if( v->test(n->_idx) ) return; 1460 if( (int)n->_idx == node_idx 1461 debug_only(|| n->debug_idx() == node_idx) ) { 1462 if (result != NULL) 1463 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n", 1464 (uintptr_t)result, (uintptr_t)n, node_idx); 1465 result = n; 1466 } 1467 v->set(n->_idx); 1468 for( uint i=0; i<n->len(); i++ ) { 1469 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; 1470 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); 1471 } 1472 // Search along forward edges also: 1473 if (idx < 0 && !only_ctrl) { 1474 for( uint j=0; j<n->outcnt(); j++ ) { 1475 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); 1476 } 1477 } 1478 #ifdef ASSERT 1479 // Search along debug_orig edges last, checking for cycles 1480 Node* orig = n->debug_orig(); 1481 if (orig != NULL) { 1482 do { 1483 if (NotANode(orig)) break; 1484 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); 1485 orig = orig->debug_orig(); 1486 } while (orig != NULL && orig != n->debug_orig()); 1487 } 1488 #endif //ASSERT 1489 } 1490 1491 // call this from debugger: 1492 Node* find_node(Node* n, int idx) { 1493 return n->find(idx); 1494 } 1495 1496 //------------------------------find------------------------------------------- 1497 Node* Node::find(int idx) const { 1498 ResourceArea *area = Thread::current()->resource_area(); 1499 VectorSet old_space(area), new_space(area); 1500 Node* result = NULL; 1501 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); 1502 return result; 1503 } 1504 1505 //------------------------------find_ctrl-------------------------------------- 1506 // Find an ancestor to this node in the control history with given _idx 1507 Node* Node::find_ctrl(int idx) const { 1508 ResourceArea *area = Thread::current()->resource_area(); 1509 VectorSet old_space(area), new_space(area); 1510 Node* result = NULL; 1511 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); 1512 return result; 1513 } 1514 #endif 1515 1516 1517 1518 #ifndef PRODUCT 1519 1520 // -----------------------------Name------------------------------------------- 1521 extern const char *NodeClassNames[]; 1522 const char *Node::Name() const { return NodeClassNames[Opcode()]; } 1523 1524 static bool is_disconnected(const Node* n) { 1525 for (uint i = 0; i < n->req(); i++) { 1526 if (n->in(i) != NULL) return false; 1527 } 1528 return true; 1529 } 1530 1531 #ifdef ASSERT 1532 static void dump_orig(Node* orig, outputStream *st) { 1533 Compile* C = Compile::current(); 1534 if (NotANode(orig)) orig = NULL; 1535 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1536 if (orig == NULL) return; 1537 st->print(" !orig="); 1538 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops 1539 if (NotANode(fast)) fast = NULL; 1540 while (orig != NULL) { 1541 bool discon = is_disconnected(orig); // if discon, print [123] else 123 1542 if (discon) st->print("["); 1543 if (!Compile::current()->node_arena()->contains(orig)) 1544 st->print("o"); 1545 st->print("%d", orig->_idx); 1546 if (discon) st->print("]"); 1547 orig = orig->debug_orig(); 1548 if (NotANode(orig)) orig = NULL; 1549 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1550 if (orig != NULL) st->print(","); 1551 if (fast != NULL) { 1552 // Step fast twice for each single step of orig: 1553 fast = fast->debug_orig(); 1554 if (NotANode(fast)) fast = NULL; 1555 if (fast != NULL && fast != orig) { 1556 fast = fast->debug_orig(); 1557 if (NotANode(fast)) fast = NULL; 1558 } 1559 if (fast == orig) { 1560 st->print("..."); 1561 break; 1562 } 1563 } 1564 } 1565 } 1566 1567 void Node::set_debug_orig(Node* orig) { 1568 _debug_orig = orig; 1569 if (BreakAtNode == 0) return; 1570 if (NotANode(orig)) orig = NULL; 1571 int trip = 10; 1572 while (orig != NULL) { 1573 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { 1574 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", 1575 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); 1576 BREAKPOINT; 1577 } 1578 orig = orig->debug_orig(); 1579 if (NotANode(orig)) orig = NULL; 1580 if (trip-- <= 0) break; 1581 } 1582 } 1583 #endif //ASSERT 1584 1585 //------------------------------dump------------------------------------------ 1586 // Dump a Node 1587 void Node::dump(const char* suffix, outputStream *st) const { 1588 Compile* C = Compile::current(); 1589 bool is_new = C->node_arena()->contains(this); 1590 C->_in_dump_cnt++; 1591 st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name()); 1592 1593 // Dump the required and precedence inputs 1594 dump_req(st); 1595 dump_prec(st); 1596 // Dump the outputs 1597 dump_out(st); 1598 1599 if (is_disconnected(this)) { 1600 #ifdef ASSERT 1601 st->print(" [%d]",debug_idx()); 1602 dump_orig(debug_orig(), st); 1603 #endif 1604 st->cr(); 1605 C->_in_dump_cnt--; 1606 return; // don't process dead nodes 1607 } 1608 1609 // Dump node-specific info 1610 dump_spec(st); 1611 #ifdef ASSERT 1612 // Dump the non-reset _debug_idx 1613 if (Verbose && WizardMode) { 1614 st->print(" [%d]",debug_idx()); 1615 } 1616 #endif 1617 1618 const Type *t = bottom_type(); 1619 1620 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { 1621 const TypeInstPtr *toop = t->isa_instptr(); 1622 const TypeKlassPtr *tkls = t->isa_klassptr(); 1623 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); 1624 if (klass && klass->is_loaded() && klass->is_interface()) { 1625 st->print(" Interface:"); 1626 } else if (toop) { 1627 st->print(" Oop:"); 1628 } else if (tkls) { 1629 st->print(" Klass:"); 1630 } 1631 t->dump_on(st); 1632 } else if (t == Type::MEMORY) { 1633 st->print(" Memory:"); 1634 MemNode::dump_adr_type(this, adr_type(), st); 1635 } else if (Verbose || WizardMode) { 1636 st->print(" Type:"); 1637 if (t) { 1638 t->dump_on(st); 1639 } else { 1640 st->print("no type"); 1641 } 1642 } else if (t->isa_vect() && this->is_MachSpillCopy()) { 1643 // Dump MachSpillcopy vector type. 1644 t->dump_on(st); 1645 } 1646 if (is_new) { 1647 debug_only(dump_orig(debug_orig(), st)); 1648 Node_Notes* nn = C->node_notes_at(_idx); 1649 if (nn != NULL && !nn->is_clear()) { 1650 if (nn->jvms() != NULL) { 1651 st->print(" !jvms:"); 1652 nn->jvms()->dump_spec(st); 1653 } 1654 } 1655 } 1656 if (suffix) st->print(suffix); 1657 C->_in_dump_cnt--; 1658 } 1659 1660 //------------------------------dump_req-------------------------------------- 1661 void Node::dump_req(outputStream *st) const { 1662 // Dump the required input edges 1663 for (uint i = 0; i < req(); i++) { // For all required inputs 1664 Node* d = in(i); 1665 if (d == NULL) { 1666 st->print("_ "); 1667 } else if (NotANode(d)) { 1668 st->print("NotANode "); // uninitialized, sentinel, garbage, etc. 1669 } else { 1670 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx); 1671 } 1672 } 1673 } 1674 1675 1676 //------------------------------dump_prec------------------------------------- 1677 void Node::dump_prec(outputStream *st) const { 1678 // Dump the precedence edges 1679 int any_prec = 0; 1680 for (uint i = req(); i < len(); i++) { // For all precedence inputs 1681 Node* p = in(i); 1682 if (p != NULL) { 1683 if (!any_prec++) st->print(" |"); 1684 if (NotANode(p)) { st->print("NotANode "); continue; } 1685 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1686 } 1687 } 1688 } 1689 1690 //------------------------------dump_out-------------------------------------- 1691 void Node::dump_out(outputStream *st) const { 1692 // Delimit the output edges 1693 st->print(" [["); 1694 // Dump the output edges 1695 for (uint i = 0; i < _outcnt; i++) { // For all outputs 1696 Node* u = _out[i]; 1697 if (u == NULL) { 1698 st->print("_ "); 1699 } else if (NotANode(u)) { 1700 st->print("NotANode "); 1701 } else { 1702 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx); 1703 } 1704 } 1705 st->print("]] "); 1706 } 1707 1708 //------------------------------dump_nodes------------------------------------- 1709 static void dump_nodes(const Node* start, int d, bool only_ctrl) { 1710 Node* s = (Node*)start; // remove const 1711 if (NotANode(s)) return; 1712 1713 uint depth = (uint)ABS(d); 1714 int direction = d; 1715 Compile* C = Compile::current(); 1716 GrowableArray <Node *> nstack(C->unique()); 1717 1718 nstack.append(s); 1719 int begin = 0; 1720 int end = 0; 1721 for(uint i = 0; i < depth; i++) { 1722 end = nstack.length(); 1723 for(int j = begin; j < end; j++) { 1724 Node* tp = nstack.at(j); 1725 uint limit = direction > 0 ? tp->len() : tp->outcnt(); 1726 for(uint k = 0; k < limit; k++) { 1727 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); 1728 1729 if (NotANode(n)) continue; 1730 // do not recurse through top or the root (would reach unrelated stuff) 1731 if (n->is_Root() || n->is_top()) continue; 1732 if (only_ctrl && !n->is_CFG()) continue; 1733 1734 bool on_stack = nstack.contains(n); 1735 if (!on_stack) { 1736 nstack.append(n); 1737 } 1738 } 1739 } 1740 begin = end; 1741 } 1742 end = nstack.length(); 1743 if (direction > 0) { 1744 for(int j = end-1; j >= 0; j--) { 1745 nstack.at(j)->dump(); 1746 } 1747 } else { 1748 for(int j = 0; j < end; j++) { 1749 nstack.at(j)->dump(); 1750 } 1751 } 1752 } 1753 1754 //------------------------------dump------------------------------------------- 1755 void Node::dump(int d) const { 1756 dump_nodes(this, d, false); 1757 } 1758 1759 //------------------------------dump_ctrl-------------------------------------- 1760 // Dump a Node's control history to depth 1761 void Node::dump_ctrl(int d) const { 1762 dump_nodes(this, d, true); 1763 } 1764 1765 // VERIFICATION CODE 1766 // For each input edge to a node (ie - for each Use-Def edge), verify that 1767 // there is a corresponding Def-Use edge. 1768 //------------------------------verify_edges----------------------------------- 1769 void Node::verify_edges(Unique_Node_List &visited) { 1770 uint i, j, idx; 1771 int cnt; 1772 Node *n; 1773 1774 // Recursive termination test 1775 if (visited.member(this)) return; 1776 visited.push(this); 1777 1778 // Walk over all input edges, checking for correspondence 1779 for( i = 0; i < len(); i++ ) { 1780 n = in(i); 1781 if (n != NULL && !n->is_top()) { 1782 // Count instances of (Node *)this 1783 cnt = 0; 1784 for (idx = 0; idx < n->_outcnt; idx++ ) { 1785 if (n->_out[idx] == (Node *)this) cnt++; 1786 } 1787 assert( cnt > 0,"Failed to find Def-Use edge." ); 1788 // Check for duplicate edges 1789 // walk the input array downcounting the input edges to n 1790 for( j = 0; j < len(); j++ ) { 1791 if( in(j) == n ) cnt--; 1792 } 1793 assert( cnt == 0,"Mismatched edge count."); 1794 } else if (n == NULL) { 1795 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges"); 1796 } else { 1797 assert(n->is_top(), "sanity"); 1798 // Nothing to check. 1799 } 1800 } 1801 // Recursive walk over all input edges 1802 for( i = 0; i < len(); i++ ) { 1803 n = in(i); 1804 if( n != NULL ) 1805 in(i)->verify_edges(visited); 1806 } 1807 } 1808 1809 //------------------------------verify_recur----------------------------------- 1810 static const Node *unique_top = NULL; 1811 1812 void Node::verify_recur(const Node *n, int verify_depth, 1813 VectorSet &old_space, VectorSet &new_space) { 1814 if ( verify_depth == 0 ) return; 1815 if (verify_depth > 0) --verify_depth; 1816 1817 Compile* C = Compile::current(); 1818 1819 // Contained in new_space or old_space? 1820 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 1821 // Check for visited in the proper space. Numberings are not unique 1822 // across spaces so we need a separate VectorSet for each space. 1823 if( v->test_set(n->_idx) ) return; 1824 1825 if (n->is_Con() && n->bottom_type() == Type::TOP) { 1826 if (C->cached_top_node() == NULL) 1827 C->set_cached_top_node((Node*)n); 1828 assert(C->cached_top_node() == n, "TOP node must be unique"); 1829 } 1830 1831 for( uint i = 0; i < n->len(); i++ ) { 1832 Node *x = n->in(i); 1833 if (!x || x->is_top()) continue; 1834 1835 // Verify my input has a def-use edge to me 1836 if (true /*VerifyDefUse*/) { 1837 // Count use-def edges from n to x 1838 int cnt = 0; 1839 for( uint j = 0; j < n->len(); j++ ) 1840 if( n->in(j) == x ) 1841 cnt++; 1842 // Count def-use edges from x to n 1843 uint max = x->_outcnt; 1844 for( uint k = 0; k < max; k++ ) 1845 if (x->_out[k] == n) 1846 cnt--; 1847 assert( cnt == 0, "mismatched def-use edge counts" ); 1848 } 1849 1850 verify_recur(x, verify_depth, old_space, new_space); 1851 } 1852 1853 } 1854 1855 //------------------------------verify----------------------------------------- 1856 // Check Def-Use info for my subgraph 1857 void Node::verify() const { 1858 Compile* C = Compile::current(); 1859 Node* old_top = C->cached_top_node(); 1860 ResourceMark rm; 1861 ResourceArea *area = Thread::current()->resource_area(); 1862 VectorSet old_space(area), new_space(area); 1863 verify_recur(this, -1, old_space, new_space); 1864 C->set_cached_top_node(old_top); 1865 } 1866 #endif 1867 1868 1869 //------------------------------walk------------------------------------------- 1870 // Graph walk, with both pre-order and post-order functions 1871 void Node::walk(NFunc pre, NFunc post, void *env) { 1872 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk 1873 walk_(pre, post, env, visited); 1874 } 1875 1876 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) { 1877 if( visited.test_set(_idx) ) return; 1878 pre(*this,env); // Call the pre-order walk function 1879 for( uint i=0; i<_max; i++ ) 1880 if( in(i) ) // Input exists and is not walked? 1881 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions 1882 post(*this,env); // Call the post-order walk function 1883 } 1884 1885 void Node::nop(Node &, void*) {} 1886 1887 //------------------------------Registers-------------------------------------- 1888 // Do we Match on this edge index or not? Generally false for Control 1889 // and true for everything else. Weird for calls & returns. 1890 uint Node::match_edge(uint idx) const { 1891 return idx; // True for other than index 0 (control) 1892 } 1893 1894 // Register classes are defined for specific machines 1895 const RegMask &Node::out_RegMask() const { 1896 ShouldNotCallThis(); 1897 return *(new RegMask()); 1898 } 1899 1900 const RegMask &Node::in_RegMask(uint) const { 1901 ShouldNotCallThis(); 1902 return *(new RegMask()); 1903 } 1904 1905 //============================================================================= 1906 //----------------------------------------------------------------------------- 1907 void Node_Array::reset( Arena *new_arena ) { 1908 _a->Afree(_nodes,_max*sizeof(Node*)); 1909 _max = 0; 1910 _nodes = NULL; 1911 _a = new_arena; 1912 } 1913 1914 //------------------------------clear------------------------------------------ 1915 // Clear all entries in _nodes to NULL but keep storage 1916 void Node_Array::clear() { 1917 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) ); 1918 } 1919 1920 //----------------------------------------------------------------------------- 1921 void Node_Array::grow( uint i ) { 1922 if( !_max ) { 1923 _max = 1; 1924 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) ); 1925 _nodes[0] = NULL; 1926 } 1927 uint old = _max; 1928 while( i >= _max ) _max <<= 1; // Double to fit 1929 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*)); 1930 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) ); 1931 } 1932 1933 //----------------------------------------------------------------------------- 1934 void Node_Array::insert( uint i, Node *n ) { 1935 if( _nodes[_max-1] ) grow(_max); // Get more space if full 1936 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*))); 1937 _nodes[i] = n; 1938 } 1939 1940 //----------------------------------------------------------------------------- 1941 void Node_Array::remove( uint i ) { 1942 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*))); 1943 _nodes[_max-1] = NULL; 1944 } 1945 1946 //----------------------------------------------------------------------------- 1947 void Node_Array::sort( C_sort_func_t func) { 1948 qsort( _nodes, _max, sizeof( Node* ), func ); 1949 } 1950 1951 //----------------------------------------------------------------------------- 1952 void Node_Array::dump() const { 1953 #ifndef PRODUCT 1954 for( uint i = 0; i < _max; i++ ) { 1955 Node *nn = _nodes[i]; 1956 if( nn != NULL ) { 1957 tty->print("%5d--> ",i); nn->dump(); 1958 } 1959 } 1960 #endif 1961 } 1962 1963 //--------------------------is_iteratively_computed------------------------------ 1964 // Operation appears to be iteratively computed (such as an induction variable) 1965 // It is possible for this operation to return false for a loop-varying 1966 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 1967 bool Node::is_iteratively_computed() { 1968 if (ideal_reg()) { // does operation have a result register? 1969 for (uint i = 1; i < req(); i++) { 1970 Node* n = in(i); 1971 if (n != NULL && n->is_Phi()) { 1972 for (uint j = 1; j < n->req(); j++) { 1973 if (n->in(j) == this) { 1974 return true; 1975 } 1976 } 1977 } 1978 } 1979 } 1980 return false; 1981 } 1982 1983 //--------------------------find_similar------------------------------ 1984 // Return a node with opcode "opc" and same inputs as "this" if one can 1985 // be found; Otherwise return NULL; 1986 Node* Node::find_similar(int opc) { 1987 if (req() >= 2) { 1988 Node* def = in(1); 1989 if (def && def->outcnt() >= 2) { 1990 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) { 1991 Node* use = def->fast_out(i); 1992 if (use->Opcode() == opc && 1993 use->req() == req()) { 1994 uint j; 1995 for (j = 0; j < use->req(); j++) { 1996 if (use->in(j) != in(j)) { 1997 break; 1998 } 1999 } 2000 if (j == use->req()) { 2001 return use; 2002 } 2003 } 2004 } 2005 } 2006 } 2007 return NULL; 2008 } 2009 2010 2011 //--------------------------unique_ctrl_out------------------------------ 2012 // Return the unique control out if only one. Null if none or more than one. 2013 Node* Node::unique_ctrl_out() { 2014 Node* found = NULL; 2015 for (uint i = 0; i < outcnt(); i++) { 2016 Node* use = raw_out(i); 2017 if (use->is_CFG() && use != this) { 2018 if (found != NULL) return NULL; 2019 found = use; 2020 } 2021 } 2022 return found; 2023 } 2024 2025 //============================================================================= 2026 //------------------------------yank------------------------------------------- 2027 // Find and remove 2028 void Node_List::yank( Node *n ) { 2029 uint i; 2030 for( i = 0; i < _cnt; i++ ) 2031 if( _nodes[i] == n ) 2032 break; 2033 2034 if( i < _cnt ) 2035 _nodes[i] = _nodes[--_cnt]; 2036 } 2037 2038 //------------------------------dump------------------------------------------- 2039 void Node_List::dump() const { 2040 #ifndef PRODUCT 2041 for( uint i = 0; i < _cnt; i++ ) 2042 if( _nodes[i] ) { 2043 tty->print("%5d--> ",i); 2044 _nodes[i]->dump(); 2045 } 2046 #endif 2047 } 2048 2049 //============================================================================= 2050 //------------------------------remove----------------------------------------- 2051 void Unique_Node_List::remove( Node *n ) { 2052 if( _in_worklist[n->_idx] ) { 2053 for( uint i = 0; i < size(); i++ ) 2054 if( _nodes[i] == n ) { 2055 map(i,Node_List::pop()); 2056 _in_worklist >>= n->_idx; 2057 return; 2058 } 2059 ShouldNotReachHere(); 2060 } 2061 } 2062 2063 //-----------------------remove_useless_nodes---------------------------------- 2064 // Remove useless nodes from worklist 2065 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { 2066 2067 for( uint i = 0; i < size(); ++i ) { 2068 Node *n = at(i); 2069 assert( n != NULL, "Did not expect null entries in worklist"); 2070 if( ! useful.test(n->_idx) ) { 2071 _in_worklist >>= n->_idx; 2072 map(i,Node_List::pop()); 2073 // Node *replacement = Node_List::pop(); 2074 // if( i != size() ) { // Check if removing last entry 2075 // _nodes[i] = replacement; 2076 // } 2077 --i; // Visit popped node 2078 // If it was last entry, loop terminates since size() was also reduced 2079 } 2080 } 2081 } 2082 2083 //============================================================================= 2084 void Node_Stack::grow() { 2085 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top 2086 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode)); 2087 size_t max = old_max << 1; // max * 2 2088 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max); 2089 _inode_max = _inodes + max; 2090 _inode_top = _inodes + old_top; // restore _top 2091 } 2092 2093 // Node_Stack is used to map nodes. 2094 Node* Node_Stack::find(uint idx) const { 2095 uint sz = size(); 2096 for (uint i=0; i < sz; i++) { 2097 if (idx == index_at(i) ) 2098 return node_at(i); 2099 } 2100 return NULL; 2101 } 2102 2103 //============================================================================= 2104 uint TypeNode::size_of() const { return sizeof(*this); } 2105 #ifndef PRODUCT 2106 void TypeNode::dump_spec(outputStream *st) const { 2107 if( !Verbose && !WizardMode ) { 2108 // standard dump does this in Verbose and WizardMode 2109 st->print(" #"); _type->dump_on(st); 2110 } 2111 } 2112 #endif 2113 uint TypeNode::hash() const { 2114 return Node::hash() + _type->hash(); 2115 } 2116 uint TypeNode::cmp( const Node &n ) const 2117 { return !Type::cmp( _type, ((TypeNode&)n)._type ); } 2118 const Type *TypeNode::bottom_type() const { return _type; } 2119 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; } 2120 2121 //------------------------------ideal_reg-------------------------------------- 2122 uint TypeNode::ideal_reg() const { 2123 return Matcher::base2reg[_type->base()]; 2124 }