1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/cfgnode.hpp" 29 #include "opto/connode.hpp" 30 #include "opto/machnode.hpp" 31 #include "opto/matcher.hpp" 32 #include "opto/node.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/regmask.hpp" 35 #include "opto/type.hpp" 36 #include "utilities/copy.hpp" 37 38 class RegMask; 39 // #include "phase.hpp" 40 class PhaseTransform; 41 class PhaseGVN; 42 43 // Arena we are currently building Nodes in 44 const uint Node::NotAMachineReg = 0xffff0000; 45 46 #ifndef PRODUCT 47 extern int nodes_created; 48 #endif 49 50 #ifdef ASSERT 51 52 //-------------------------- construct_node------------------------------------ 53 // Set a breakpoint here to identify where a particular node index is built. 54 void Node::verify_construction() { 55 _debug_orig = NULL; 56 int old_debug_idx = Compile::debug_idx(); 57 int new_debug_idx = old_debug_idx+1; 58 if (new_debug_idx > 0) { 59 // Arrange that the lowest five decimal digits of _debug_idx 60 // will repeat thos of _idx. In case this is somehow pathological, 61 // we continue to assign negative numbers (!) consecutively. 62 const int mod = 100000; 63 int bump = (int)(_idx - new_debug_idx) % mod; 64 if (bump < 0) bump += mod; 65 assert(bump >= 0 && bump < mod, ""); 66 new_debug_idx += bump; 67 } 68 Compile::set_debug_idx(new_debug_idx); 69 set_debug_idx( new_debug_idx ); 70 assert(Compile::current()->unique() < (uint)MaxNodeLimit, "Node limit exceeded"); 71 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { 72 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); 73 BREAKPOINT; 74 } 75 #if OPTO_DU_ITERATOR_ASSERT 76 _last_del = NULL; 77 _del_tick = 0; 78 #endif 79 _hash_lock = 0; 80 } 81 82 83 // #ifdef ASSERT ... 84 85 #if OPTO_DU_ITERATOR_ASSERT 86 void DUIterator_Common::sample(const Node* node) { 87 _vdui = VerifyDUIterators; 88 _node = node; 89 _outcnt = node->_outcnt; 90 _del_tick = node->_del_tick; 91 _last = NULL; 92 } 93 94 void DUIterator_Common::verify(const Node* node, bool at_end_ok) { 95 assert(_node == node, "consistent iterator source"); 96 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed"); 97 } 98 99 void DUIterator_Common::verify_resync() { 100 // Ensure that the loop body has just deleted the last guy produced. 101 const Node* node = _node; 102 // Ensure that at least one copy of the last-seen edge was deleted. 103 // Note: It is OK to delete multiple copies of the last-seen edge. 104 // Unfortunately, we have no way to verify that all the deletions delete 105 // that same edge. On this point we must use the Honor System. 106 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge"); 107 assert(node->_last_del == _last, "must have deleted the edge just produced"); 108 // We liked this deletion, so accept the resulting outcnt and tick. 109 _outcnt = node->_outcnt; 110 _del_tick = node->_del_tick; 111 } 112 113 void DUIterator_Common::reset(const DUIterator_Common& that) { 114 if (this == &that) return; // ignore assignment to self 115 if (!_vdui) { 116 // We need to initialize everything, overwriting garbage values. 117 _last = that._last; 118 _vdui = that._vdui; 119 } 120 // Note: It is legal (though odd) for an iterator over some node x 121 // to be reassigned to iterate over another node y. Some doubly-nested 122 // progress loops depend on being able to do this. 123 const Node* node = that._node; 124 // Re-initialize everything, except _last. 125 _node = node; 126 _outcnt = node->_outcnt; 127 _del_tick = node->_del_tick; 128 } 129 130 void DUIterator::sample(const Node* node) { 131 DUIterator_Common::sample(node); // Initialize the assertion data. 132 _refresh_tick = 0; // No refreshes have happened, as yet. 133 } 134 135 void DUIterator::verify(const Node* node, bool at_end_ok) { 136 DUIterator_Common::verify(node, at_end_ok); 137 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range"); 138 } 139 140 void DUIterator::verify_increment() { 141 if (_refresh_tick & 1) { 142 // We have refreshed the index during this loop. 143 // Fix up _idx to meet asserts. 144 if (_idx > _outcnt) _idx = _outcnt; 145 } 146 verify(_node, true); 147 } 148 149 void DUIterator::verify_resync() { 150 // Note: We do not assert on _outcnt, because insertions are OK here. 151 DUIterator_Common::verify_resync(); 152 // Make sure we are still in sync, possibly with no more out-edges: 153 verify(_node, true); 154 } 155 156 void DUIterator::reset(const DUIterator& that) { 157 if (this == &that) return; // self assignment is always a no-op 158 assert(that._refresh_tick == 0, "assign only the result of Node::outs()"); 159 assert(that._idx == 0, "assign only the result of Node::outs()"); 160 assert(_idx == that._idx, "already assigned _idx"); 161 if (!_vdui) { 162 // We need to initialize everything, overwriting garbage values. 163 sample(that._node); 164 } else { 165 DUIterator_Common::reset(that); 166 if (_refresh_tick & 1) { 167 _refresh_tick++; // Clear the "was refreshed" flag. 168 } 169 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly"); 170 } 171 } 172 173 void DUIterator::refresh() { 174 DUIterator_Common::sample(_node); // Re-fetch assertion data. 175 _refresh_tick |= 1; // Set the "was refreshed" flag. 176 } 177 178 void DUIterator::verify_finish() { 179 // If the loop has killed the node, do not require it to re-run. 180 if (_node->_outcnt == 0) _refresh_tick &= ~1; 181 // If this assert triggers, it means that a loop used refresh_out_pos 182 // to re-synch an iteration index, but the loop did not correctly 183 // re-run itself, using a "while (progress)" construct. 184 // This iterator enforces the rule that you must keep trying the loop 185 // until it "runs clean" without any need for refreshing. 186 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing"); 187 } 188 189 190 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) { 191 DUIterator_Common::verify(node, at_end_ok); 192 Node** out = node->_out; 193 uint cnt = node->_outcnt; 194 assert(cnt == _outcnt, "no insertions allowed"); 195 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range"); 196 // This last check is carefully designed to work for NO_OUT_ARRAY. 197 } 198 199 void DUIterator_Fast::verify_limit() { 200 const Node* node = _node; 201 verify(node, true); 202 assert(_outp == node->_out + node->_outcnt, "limit still correct"); 203 } 204 205 void DUIterator_Fast::verify_resync() { 206 const Node* node = _node; 207 if (_outp == node->_out + _outcnt) { 208 // Note that the limit imax, not the pointer i, gets updated with the 209 // exact count of deletions. (For the pointer it's always "--i".) 210 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)"); 211 // This is a limit pointer, with a name like "imax". 212 // Fudge the _last field so that the common assert will be happy. 213 _last = (Node*) node->_last_del; 214 DUIterator_Common::verify_resync(); 215 } else { 216 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)"); 217 // A normal internal pointer. 218 DUIterator_Common::verify_resync(); 219 // Make sure we are still in sync, possibly with no more out-edges: 220 verify(node, true); 221 } 222 } 223 224 void DUIterator_Fast::verify_relimit(uint n) { 225 const Node* node = _node; 226 assert((int)n > 0, "use imax -= n only with a positive count"); 227 // This must be a limit pointer, with a name like "imax". 228 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)"); 229 // The reported number of deletions must match what the node saw. 230 assert(node->_del_tick == _del_tick + n, "must have deleted n edges"); 231 // Fudge the _last field so that the common assert will be happy. 232 _last = (Node*) node->_last_del; 233 DUIterator_Common::verify_resync(); 234 } 235 236 void DUIterator_Fast::reset(const DUIterator_Fast& that) { 237 assert(_outp == that._outp, "already assigned _outp"); 238 DUIterator_Common::reset(that); 239 } 240 241 void DUIterator_Last::verify(const Node* node, bool at_end_ok) { 242 // at_end_ok means the _outp is allowed to underflow by 1 243 _outp += at_end_ok; 244 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc. 245 _outp -= at_end_ok; 246 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes"); 247 } 248 249 void DUIterator_Last::verify_limit() { 250 // Do not require the limit address to be resynched. 251 //verify(node, true); 252 assert(_outp == _node->_out, "limit still correct"); 253 } 254 255 void DUIterator_Last::verify_step(uint num_edges) { 256 assert((int)num_edges > 0, "need non-zero edge count for loop progress"); 257 _outcnt -= num_edges; 258 _del_tick += num_edges; 259 // Make sure we are still in sync, possibly with no more out-edges: 260 const Node* node = _node; 261 verify(node, true); 262 assert(node->_last_del == _last, "must have deleted the edge just produced"); 263 } 264 265 #endif //OPTO_DU_ITERATOR_ASSERT 266 267 268 #endif //ASSERT 269 270 271 // This constant used to initialize _out may be any non-null value. 272 // The value NULL is reserved for the top node only. 273 #define NO_OUT_ARRAY ((Node**)-1) 274 275 // This funny expression handshakes with Node::operator new 276 // to pull Compile::current out of the new node's _out field, 277 // and then calls a subroutine which manages most field 278 // initializations. The only one which is tricky is the 279 // _idx field, which is const, and so must be initialized 280 // by a return value, not an assignment. 281 // 282 // (Aren't you thankful that Java finals don't require so many tricks?) 283 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out) 284 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355 285 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 286 #endif 287 288 // Out-of-line code from node constructors. 289 // Executed only when extra debug info. is being passed around. 290 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) { 291 C->set_node_notes_at(idx, nn); 292 } 293 294 // Shared initialization code. 295 inline int Node::Init(int req, Compile* C) { 296 assert(Compile::current() == C, "must use operator new(Compile*)"); 297 int idx = C->next_unique(); 298 299 // If there are default notes floating around, capture them: 300 Node_Notes* nn = C->default_node_notes(); 301 if (nn != NULL) init_node_notes(C, idx, nn); 302 303 // Note: At this point, C is dead, 304 // and we begin to initialize the new Node. 305 306 _cnt = _max = req; 307 _outcnt = _outmax = 0; 308 _class_id = Class_Node; 309 _flags = 0; 310 _out = NO_OUT_ARRAY; 311 return idx; 312 } 313 314 //------------------------------Node------------------------------------------- 315 // Create a Node, with a given number of required edges. 316 Node::Node(uint req) 317 : _idx(IDX_INIT(req)) 318 { 319 assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" ); 320 debug_only( verify_construction() ); 321 NOT_PRODUCT(nodes_created++); 322 if (req == 0) { 323 assert( _in == (Node**)this, "Must not pass arg count to 'new'" ); 324 _in = NULL; 325 } else { 326 assert( _in[req-1] == this, "Must pass arg count to 'new'" ); 327 Node** to = _in; 328 for(uint i = 0; i < req; i++) { 329 to[i] = NULL; 330 } 331 } 332 } 333 334 //------------------------------Node------------------------------------------- 335 Node::Node(Node *n0) 336 : _idx(IDX_INIT(1)) 337 { 338 debug_only( verify_construction() ); 339 NOT_PRODUCT(nodes_created++); 340 // Assert we allocated space for input array already 341 assert( _in[0] == this, "Must pass arg count to 'new'" ); 342 assert( is_not_dead(n0), "can not use dead node"); 343 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 344 } 345 346 //------------------------------Node------------------------------------------- 347 Node::Node(Node *n0, Node *n1) 348 : _idx(IDX_INIT(2)) 349 { 350 debug_only( verify_construction() ); 351 NOT_PRODUCT(nodes_created++); 352 // Assert we allocated space for input array already 353 assert( _in[1] == this, "Must pass arg count to 'new'" ); 354 assert( is_not_dead(n0), "can not use dead node"); 355 assert( is_not_dead(n1), "can not use dead node"); 356 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 357 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 358 } 359 360 //------------------------------Node------------------------------------------- 361 Node::Node(Node *n0, Node *n1, Node *n2) 362 : _idx(IDX_INIT(3)) 363 { 364 debug_only( verify_construction() ); 365 NOT_PRODUCT(nodes_created++); 366 // Assert we allocated space for input array already 367 assert( _in[2] == this, "Must pass arg count to 'new'" ); 368 assert( is_not_dead(n0), "can not use dead node"); 369 assert( is_not_dead(n1), "can not use dead node"); 370 assert( is_not_dead(n2), "can not use dead node"); 371 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 372 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 373 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 374 } 375 376 //------------------------------Node------------------------------------------- 377 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) 378 : _idx(IDX_INIT(4)) 379 { 380 debug_only( verify_construction() ); 381 NOT_PRODUCT(nodes_created++); 382 // Assert we allocated space for input array already 383 assert( _in[3] == this, "Must pass arg count to 'new'" ); 384 assert( is_not_dead(n0), "can not use dead node"); 385 assert( is_not_dead(n1), "can not use dead node"); 386 assert( is_not_dead(n2), "can not use dead node"); 387 assert( is_not_dead(n3), "can not use dead node"); 388 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 389 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 390 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 391 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 392 } 393 394 //------------------------------Node------------------------------------------- 395 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) 396 : _idx(IDX_INIT(5)) 397 { 398 debug_only( verify_construction() ); 399 NOT_PRODUCT(nodes_created++); 400 // Assert we allocated space for input array already 401 assert( _in[4] == this, "Must pass arg count to 'new'" ); 402 assert( is_not_dead(n0), "can not use dead node"); 403 assert( is_not_dead(n1), "can not use dead node"); 404 assert( is_not_dead(n2), "can not use dead node"); 405 assert( is_not_dead(n3), "can not use dead node"); 406 assert( is_not_dead(n4), "can not use dead node"); 407 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 408 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 409 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 410 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 411 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 412 } 413 414 //------------------------------Node------------------------------------------- 415 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 416 Node *n4, Node *n5) 417 : _idx(IDX_INIT(6)) 418 { 419 debug_only( verify_construction() ); 420 NOT_PRODUCT(nodes_created++); 421 // Assert we allocated space for input array already 422 assert( _in[5] == this, "Must pass arg count to 'new'" ); 423 assert( is_not_dead(n0), "can not use dead node"); 424 assert( is_not_dead(n1), "can not use dead node"); 425 assert( is_not_dead(n2), "can not use dead node"); 426 assert( is_not_dead(n3), "can not use dead node"); 427 assert( is_not_dead(n4), "can not use dead node"); 428 assert( is_not_dead(n5), "can not use dead node"); 429 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 430 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 431 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 432 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 433 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 434 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 435 } 436 437 //------------------------------Node------------------------------------------- 438 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 439 Node *n4, Node *n5, Node *n6) 440 : _idx(IDX_INIT(7)) 441 { 442 debug_only( verify_construction() ); 443 NOT_PRODUCT(nodes_created++); 444 // Assert we allocated space for input array already 445 assert( _in[6] == this, "Must pass arg count to 'new'" ); 446 assert( is_not_dead(n0), "can not use dead node"); 447 assert( is_not_dead(n1), "can not use dead node"); 448 assert( is_not_dead(n2), "can not use dead node"); 449 assert( is_not_dead(n3), "can not use dead node"); 450 assert( is_not_dead(n4), "can not use dead node"); 451 assert( is_not_dead(n5), "can not use dead node"); 452 assert( is_not_dead(n6), "can not use dead node"); 453 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 454 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 455 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 456 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 457 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 458 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 459 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); 460 } 461 462 463 //------------------------------clone------------------------------------------ 464 // Clone a Node. 465 Node *Node::clone() const { 466 Compile *compile = Compile::current(); 467 uint s = size_of(); // Size of inherited Node 468 Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); 469 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); 470 // Set the new input pointer array 471 n->_in = (Node**)(((char*)n)+s); 472 // Cannot share the old output pointer array, so kill it 473 n->_out = NO_OUT_ARRAY; 474 // And reset the counters to 0 475 n->_outcnt = 0; 476 n->_outmax = 0; 477 // Unlock this guy, since he is not in any hash table. 478 debug_only(n->_hash_lock = 0); 479 // Walk the old node's input list to duplicate its edges 480 uint i; 481 for( i = 0; i < len(); i++ ) { 482 Node *x = in(i); 483 n->_in[i] = x; 484 if (x != NULL) x->add_out(n); 485 } 486 if (is_macro()) 487 compile->add_macro_node(n); 488 489 n->set_idx(compile->next_unique()); // Get new unique index as well 490 debug_only( n->verify_construction() ); 491 NOT_PRODUCT(nodes_created++); 492 // Do not patch over the debug_idx of a clone, because it makes it 493 // impossible to break on the clone's moment of creation. 494 //debug_only( n->set_debug_idx( debug_idx() ) ); 495 496 compile->copy_node_notes_to(n, (Node*) this); 497 498 // MachNode clone 499 uint nopnds; 500 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) { 501 MachNode *mach = n->as_Mach(); 502 MachNode *mthis = this->as_Mach(); 503 // Get address of _opnd_array. 504 // It should be the same offset since it is the clone of this node. 505 MachOper **from = mthis->_opnds; 506 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 507 pointer_delta((const void*)from, 508 (const void*)(&mthis->_opnds), 1)); 509 mach->_opnds = to; 510 for ( uint i = 0; i < nopnds; ++i ) { 511 to[i] = from[i]->clone(compile); 512 } 513 } 514 // cloning CallNode may need to clone JVMState 515 if (n->is_Call()) { 516 CallNode *call = n->as_Call(); 517 call->clone_jvms(); 518 } 519 return n; // Return the clone 520 } 521 522 //---------------------------setup_is_top-------------------------------------- 523 // Call this when changing the top node, to reassert the invariants 524 // required by Node::is_top. See Compile::set_cached_top_node. 525 void Node::setup_is_top() { 526 if (this == (Node*)Compile::current()->top()) { 527 // This node has just become top. Kill its out array. 528 _outcnt = _outmax = 0; 529 _out = NULL; // marker value for top 530 assert(is_top(), "must be top"); 531 } else { 532 if (_out == NULL) _out = NO_OUT_ARRAY; 533 assert(!is_top(), "must not be top"); 534 } 535 } 536 537 538 //------------------------------~Node------------------------------------------ 539 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 540 extern int reclaim_idx ; 541 extern int reclaim_in ; 542 extern int reclaim_node; 543 void Node::destruct() { 544 // Eagerly reclaim unique Node numberings 545 Compile* compile = Compile::current(); 546 if ((uint)_idx+1 == compile->unique()) { 547 compile->set_unique(compile->unique()-1); 548 #ifdef ASSERT 549 reclaim_idx++; 550 #endif 551 } 552 // Clear debug info: 553 Node_Notes* nn = compile->node_notes_at(_idx); 554 if (nn != NULL) nn->clear(); 555 // Walk the input array, freeing the corresponding output edges 556 _cnt = _max; // forget req/prec distinction 557 uint i; 558 for( i = 0; i < _max; i++ ) { 559 set_req(i, NULL); 560 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); 561 } 562 assert(outcnt() == 0, "deleting a node must not leave a dangling use"); 563 // See if the input array was allocated just prior to the object 564 int edge_size = _max*sizeof(void*); 565 int out_edge_size = _outmax*sizeof(void*); 566 char *edge_end = ((char*)_in) + edge_size; 567 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); 568 char *out_edge_end = out_array + out_edge_size; 569 int node_size = size_of(); 570 571 // Free the output edge array 572 if (out_edge_size > 0) { 573 #ifdef ASSERT 574 if( out_edge_end == compile->node_arena()->hwm() ) 575 reclaim_in += out_edge_size; // count reclaimed out edges with in edges 576 #endif 577 compile->node_arena()->Afree(out_array, out_edge_size); 578 } 579 580 // Free the input edge array and the node itself 581 if( edge_end == (char*)this ) { 582 #ifdef ASSERT 583 if( edge_end+node_size == compile->node_arena()->hwm() ) { 584 reclaim_in += edge_size; 585 reclaim_node+= node_size; 586 } 587 #else 588 // It was; free the input array and object all in one hit 589 compile->node_arena()->Afree(_in,edge_size+node_size); 590 #endif 591 } else { 592 593 // Free just the input array 594 #ifdef ASSERT 595 if( edge_end == compile->node_arena()->hwm() ) 596 reclaim_in += edge_size; 597 #endif 598 compile->node_arena()->Afree(_in,edge_size); 599 600 // Free just the object 601 #ifdef ASSERT 602 if( ((char*)this) + node_size == compile->node_arena()->hwm() ) 603 reclaim_node+= node_size; 604 #else 605 compile->node_arena()->Afree(this,node_size); 606 #endif 607 } 608 if (is_macro()) { 609 compile->remove_macro_node(this); 610 } 611 #ifdef ASSERT 612 // We will not actually delete the storage, but we'll make the node unusable. 613 *(address*)this = badAddress; // smash the C++ vtbl, probably 614 _in = _out = (Node**) badAddress; 615 _max = _cnt = _outmax = _outcnt = 0; 616 #endif 617 } 618 619 //------------------------------grow------------------------------------------- 620 // Grow the input array, making space for more edges 621 void Node::grow( uint len ) { 622 Arena* arena = Compile::current()->node_arena(); 623 uint new_max = _max; 624 if( new_max == 0 ) { 625 _max = 4; 626 _in = (Node**)arena->Amalloc(4*sizeof(Node*)); 627 Node** to = _in; 628 to[0] = NULL; 629 to[1] = NULL; 630 to[2] = NULL; 631 to[3] = NULL; 632 return; 633 } 634 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 635 // Trimming to limit allows a uint8 to handle up to 255 edges. 636 // Previously I was using only powers-of-2 which peaked at 128 edges. 637 //if( new_max >= limit ) new_max = limit-1; 638 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); 639 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space 640 _max = new_max; // Record new max length 641 // This assertion makes sure that Node::_max is wide enough to 642 // represent the numerical value of new_max. 643 assert(_max == new_max && _max > len, "int width of _max is too small"); 644 } 645 646 //-----------------------------out_grow---------------------------------------- 647 // Grow the input array, making space for more edges 648 void Node::out_grow( uint len ) { 649 assert(!is_top(), "cannot grow a top node's out array"); 650 Arena* arena = Compile::current()->node_arena(); 651 uint new_max = _outmax; 652 if( new_max == 0 ) { 653 _outmax = 4; 654 _out = (Node **)arena->Amalloc(4*sizeof(Node*)); 655 return; 656 } 657 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 658 // Trimming to limit allows a uint8 to handle up to 255 edges. 659 // Previously I was using only powers-of-2 which peaked at 128 edges. 660 //if( new_max >= limit ) new_max = limit-1; 661 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); 662 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); 663 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space 664 _outmax = new_max; // Record new max length 665 // This assertion makes sure that Node::_max is wide enough to 666 // represent the numerical value of new_max. 667 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small"); 668 } 669 670 #ifdef ASSERT 671 //------------------------------is_dead---------------------------------------- 672 bool Node::is_dead() const { 673 // Mach and pinch point nodes may look like dead. 674 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) 675 return false; 676 for( uint i = 0; i < _max; i++ ) 677 if( _in[i] != NULL ) 678 return false; 679 dump(); 680 return true; 681 } 682 #endif 683 684 //------------------------------add_req---------------------------------------- 685 // Add a new required input at the end 686 void Node::add_req( Node *n ) { 687 assert( is_not_dead(n), "can not use dead node"); 688 689 // Look to see if I can move precedence down one without reallocating 690 if( (_cnt >= _max) || (in(_max-1) != NULL) ) 691 grow( _max+1 ); 692 693 // Find a precedence edge to move 694 if( in(_cnt) != NULL ) { // Next precedence edge is busy? 695 uint i; 696 for( i=_cnt; i<_max; i++ ) 697 if( in(i) == NULL ) // Find the NULL at end of prec edge list 698 break; // There must be one, since we grew the array 699 _in[i] = in(_cnt); // Move prec over, making space for req edge 700 } 701 _in[_cnt++] = n; // Stuff over old prec edge 702 if (n != NULL) n->add_out((Node *)this); 703 } 704 705 //---------------------------add_req_batch------------------------------------- 706 // Add a new required input at the end 707 void Node::add_req_batch( Node *n, uint m ) { 708 assert( is_not_dead(n), "can not use dead node"); 709 // check various edge cases 710 if ((int)m <= 1) { 711 assert((int)m >= 0, "oob"); 712 if (m != 0) add_req(n); 713 return; 714 } 715 716 // Look to see if I can move precedence down one without reallocating 717 if( (_cnt+m) > _max || _in[_max-m] ) 718 grow( _max+m ); 719 720 // Find a precedence edge to move 721 if( _in[_cnt] != NULL ) { // Next precedence edge is busy? 722 uint i; 723 for( i=_cnt; i<_max; i++ ) 724 if( _in[i] == NULL ) // Find the NULL at end of prec edge list 725 break; // There must be one, since we grew the array 726 // Slide all the precs over by m positions (assume #prec << m). 727 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); 728 } 729 730 // Stuff over the old prec edges 731 for(uint i=0; i<m; i++ ) { 732 _in[_cnt++] = n; 733 } 734 735 // Insert multiple out edges on the node. 736 if (n != NULL && !n->is_top()) { 737 for(uint i=0; i<m; i++ ) { 738 n->add_out((Node *)this); 739 } 740 } 741 } 742 743 //------------------------------del_req---------------------------------------- 744 // Delete the required edge and compact the edge array 745 void Node::del_req( uint idx ) { 746 assert( idx < _cnt, "oob"); 747 assert( !VerifyHashTableKeys || _hash_lock == 0, 748 "remove node from hash table before modifying it"); 749 // First remove corresponding def-use edge 750 Node *n = in(idx); 751 if (n != NULL) n->del_out((Node *)this); 752 _in[idx] = in(--_cnt); // Compact the array 753 _in[_cnt] = NULL; // NULL out emptied slot 754 } 755 756 //------------------------------ins_req---------------------------------------- 757 // Insert a new required input at the end 758 void Node::ins_req( uint idx, Node *n ) { 759 assert( is_not_dead(n), "can not use dead node"); 760 add_req(NULL); // Make space 761 assert( idx < _max, "Must have allocated enough space"); 762 // Slide over 763 if(_cnt-idx-1 > 0) { 764 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); 765 } 766 _in[idx] = n; // Stuff over old required edge 767 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge 768 } 769 770 //-----------------------------find_edge--------------------------------------- 771 int Node::find_edge(Node* n) { 772 for (uint i = 0; i < len(); i++) { 773 if (_in[i] == n) return i; 774 } 775 return -1; 776 } 777 778 //----------------------------replace_edge------------------------------------- 779 int Node::replace_edge(Node* old, Node* neww) { 780 if (old == neww) return 0; // nothing to do 781 uint nrep = 0; 782 for (uint i = 0; i < len(); i++) { 783 if (in(i) == old) { 784 if (i < req()) 785 set_req(i, neww); 786 else 787 set_prec(i, neww); 788 nrep++; 789 } 790 } 791 return nrep; 792 } 793 794 //-------------------------disconnect_inputs----------------------------------- 795 // NULL out all inputs to eliminate incoming Def-Use edges. 796 // Return the number of edges between 'n' and 'this' 797 int Node::disconnect_inputs(Node *n) { 798 int edges_to_n = 0; 799 800 uint cnt = req(); 801 for( uint i = 0; i < cnt; ++i ) { 802 if( in(i) == 0 ) continue; 803 if( in(i) == n ) ++edges_to_n; 804 set_req(i, NULL); 805 } 806 // Remove precedence edges if any exist 807 // Note: Safepoints may have precedence edges, even during parsing 808 if( (req() != len()) && (in(req()) != NULL) ) { 809 uint max = len(); 810 for( uint i = 0; i < max; ++i ) { 811 if( in(i) == 0 ) continue; 812 if( in(i) == n ) ++edges_to_n; 813 set_prec(i, NULL); 814 } 815 } 816 817 // Node::destruct requires all out edges be deleted first 818 // debug_only(destruct();) // no reuse benefit expected 819 return edges_to_n; 820 } 821 822 //-----------------------------uncast--------------------------------------- 823 // %%% Temporary, until we sort out CheckCastPP vs. CastPP. 824 // Strip away casting. (It is depth-limited.) 825 Node* Node::uncast() const { 826 // Should be inline: 827 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; 828 if (is_ConstraintCast() || is_CheckCastPP()) 829 return uncast_helper(this); 830 else 831 return (Node*) this; 832 } 833 834 //---------------------------uncast_helper------------------------------------- 835 Node* Node::uncast_helper(const Node* p) { 836 uint max_depth = 3; 837 for (uint i = 0; i < max_depth; i++) { 838 if (p == NULL || p->req() != 2) { 839 break; 840 } else if (p->is_ConstraintCast()) { 841 p = p->in(1); 842 } else if (p->is_CheckCastPP()) { 843 p = p->in(1); 844 } else { 845 break; 846 } 847 } 848 return (Node*) p; 849 } 850 851 //------------------------------add_prec--------------------------------------- 852 // Add a new precedence input. Precedence inputs are unordered, with 853 // duplicates removed and NULLs packed down at the end. 854 void Node::add_prec( Node *n ) { 855 assert( is_not_dead(n), "can not use dead node"); 856 857 // Check for NULL at end 858 if( _cnt >= _max || in(_max-1) ) 859 grow( _max+1 ); 860 861 // Find a precedence edge to move 862 uint i = _cnt; 863 while( in(i) != NULL ) i++; 864 _in[i] = n; // Stuff prec edge over NULL 865 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge 866 } 867 868 //------------------------------rm_prec---------------------------------------- 869 // Remove a precedence input. Precedence inputs are unordered, with 870 // duplicates removed and NULLs packed down at the end. 871 void Node::rm_prec( uint j ) { 872 873 // Find end of precedence list to pack NULLs 874 uint i; 875 for( i=j; i<_max; i++ ) 876 if( !_in[i] ) // Find the NULL at end of prec edge list 877 break; 878 if (_in[j] != NULL) _in[j]->del_out((Node *)this); 879 _in[j] = _in[--i]; // Move last element over removed guy 880 _in[i] = NULL; // NULL out last element 881 } 882 883 //------------------------------size_of---------------------------------------- 884 uint Node::size_of() const { return sizeof(*this); } 885 886 //------------------------------ideal_reg-------------------------------------- 887 uint Node::ideal_reg() const { return 0; } 888 889 //------------------------------jvms------------------------------------------- 890 JVMState* Node::jvms() const { return NULL; } 891 892 #ifdef ASSERT 893 //------------------------------jvms------------------------------------------- 894 bool Node::verify_jvms(const JVMState* using_jvms) const { 895 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 896 if (jvms == using_jvms) return true; 897 } 898 return false; 899 } 900 901 //------------------------------init_NodeProperty------------------------------ 902 void Node::init_NodeProperty() { 903 assert(_max_classes <= max_jushort, "too many NodeProperty classes"); 904 assert(_max_flags <= max_jushort, "too many NodeProperty flags"); 905 } 906 #endif 907 908 //------------------------------format----------------------------------------- 909 // Print as assembly 910 void Node::format( PhaseRegAlloc *, outputStream *st ) const {} 911 //------------------------------emit------------------------------------------- 912 // Emit bytes starting at parameter 'ptr'. 913 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 914 //------------------------------size------------------------------------------- 915 // Size of instruction in bytes 916 uint Node::size(PhaseRegAlloc *ra_) const { return 0; } 917 918 //------------------------------CFG Construction------------------------------- 919 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root, 920 // Goto and Return. 921 const Node *Node::is_block_proj() const { return 0; } 922 923 // Minimum guaranteed type 924 const Type *Node::bottom_type() const { return Type::BOTTOM; } 925 926 927 //------------------------------raise_bottom_type------------------------------ 928 // Get the worst-case Type output for this Node. 929 void Node::raise_bottom_type(const Type* new_type) { 930 if (is_Type()) { 931 TypeNode *n = this->as_Type(); 932 if (VerifyAliases) { 933 assert(new_type->higher_equal(n->type()), "new type must refine old type"); 934 } 935 n->set_type(new_type); 936 } else if (is_Load()) { 937 LoadNode *n = this->as_Load(); 938 if (VerifyAliases) { 939 assert(new_type->higher_equal(n->type()), "new type must refine old type"); 940 } 941 n->set_type(new_type); 942 } 943 } 944 945 //------------------------------Identity--------------------------------------- 946 // Return a node that the given node is equivalent to. 947 Node *Node::Identity( PhaseTransform * ) { 948 return this; // Default to no identities 949 } 950 951 //------------------------------Value------------------------------------------ 952 // Compute a new Type for a node using the Type of the inputs. 953 const Type *Node::Value( PhaseTransform * ) const { 954 return bottom_type(); // Default to worst-case Type 955 } 956 957 //------------------------------Ideal------------------------------------------ 958 // 959 // 'Idealize' the graph rooted at this Node. 960 // 961 // In order to be efficient and flexible there are some subtle invariants 962 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks 963 // these invariants, although its too slow to have on by default. If you are 964 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN! 965 // 966 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this' 967 // pointer. If ANY change is made, it must return the root of the reshaped 968 // graph - even if the root is the same Node. Example: swapping the inputs 969 // to an AddINode gives the same answer and same root, but you still have to 970 // return the 'this' pointer instead of NULL. 971 // 972 // You cannot return an OLD Node, except for the 'this' pointer. Use the 973 // Identity call to return an old Node; basically if Identity can find 974 // another Node have the Ideal call make no change and return NULL. 975 // Example: AddINode::Ideal must check for add of zero; in this case it 976 // returns NULL instead of doing any graph reshaping. 977 // 978 // You cannot modify any old Nodes except for the 'this' pointer. Due to 979 // sharing there may be other users of the old Nodes relying on their current 980 // semantics. Modifying them will break the other users. 981 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 982 // "X+3" unchanged in case it is shared. 983 // 984 // If you modify the 'this' pointer's inputs, you should use 985 // 'set_req'. If you are making a new Node (either as the new root or 986 // some new internal piece) you may use 'init_req' to set the initial 987 // value. You can make a new Node with either 'new' or 'clone'. In 988 // either case, def-use info is correctly maintained. 989 // 990 // Example: reshape "(X+3)+4" into "X+7": 991 // set_req(1, in(1)->in(1)); 992 // set_req(2, phase->intcon(7)); 993 // return this; 994 // Example: reshape "X*4" into "X<<2" 995 // return new (C,3) LShiftINode(in(1), phase->intcon(2)); 996 // 997 // You must call 'phase->transform(X)' on any new Nodes X you make, except 998 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 999 // Node *shift=phase->transform(new(C,3)LShiftINode(in(1),phase->intcon(5))); 1000 // return new (C,3) AddINode(shift, in(1)); 1001 // 1002 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 1003 // These forms are faster than 'phase->transform(new (C,1) ConNode())' and Do 1004 // The Right Thing with def-use info. 1005 // 1006 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped 1007 // graph uses the 'this' Node it must be the root. If you want a Node with 1008 // the same Opcode as the 'this' pointer use 'clone'. 1009 // 1010 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { 1011 return NULL; // Default to being Ideal already 1012 } 1013 1014 // Some nodes have specific Ideal subgraph transformations only if they are 1015 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1016 // for the transformations to happen. 1017 bool Node::has_special_unique_user() const { 1018 assert(outcnt() == 1, "match only for unique out"); 1019 Node* n = unique_out(); 1020 int op = Opcode(); 1021 if( this->is_Store() ) { 1022 // Condition for back-to-back stores folding. 1023 return n->Opcode() == op && n->in(MemNode::Memory) == this; 1024 } else if( op == Op_AddL ) { 1025 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) 1026 return n->Opcode() == Op_ConvL2I && n->in(1) == this; 1027 } else if( op == Op_SubI || op == Op_SubL ) { 1028 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y) 1029 return n->Opcode() == op && n->in(2) == this; 1030 } 1031 return false; 1032 }; 1033 1034 //--------------------------find_exact_control--------------------------------- 1035 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1036 Node* Node::find_exact_control(Node* ctrl) { 1037 if (ctrl == NULL && this->is_Region()) 1038 ctrl = this->as_Region()->is_copy(); 1039 1040 if (ctrl != NULL && ctrl->is_CatchProj()) { 1041 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) 1042 ctrl = ctrl->in(0); 1043 if (ctrl != NULL && !ctrl->is_top()) 1044 ctrl = ctrl->in(0); 1045 } 1046 1047 if (ctrl != NULL && ctrl->is_Proj()) 1048 ctrl = ctrl->in(0); 1049 1050 return ctrl; 1051 } 1052 1053 //--------------------------dominates------------------------------------------ 1054 // Helper function for MemNode::all_controls_dominate(). 1055 // Check if 'this' control node dominates or equal to 'sub' control node. 1056 // We already know that if any path back to Root or Start reaches 'this', 1057 // then all paths so, so this is a simple search for one example, 1058 // not an exhaustive search for a counterexample. 1059 bool Node::dominates(Node* sub, Node_List &nlist) { 1060 assert(this->is_CFG(), "expecting control"); 1061 assert(sub != NULL && sub->is_CFG(), "expecting control"); 1062 1063 // detect dead cycle without regions 1064 int iterations_without_region_limit = DominatorSearchLimit; 1065 1066 Node* orig_sub = sub; 1067 Node* dom = this; 1068 bool met_dom = false; 1069 nlist.clear(); 1070 1071 // Walk 'sub' backward up the chain to 'dom', watching for regions. 1072 // After seeing 'dom', continue up to Root or Start. 1073 // If we hit a region (backward split point), it may be a loop head. 1074 // Keep going through one of the region's inputs. If we reach the 1075 // same region again, go through a different input. Eventually we 1076 // will either exit through the loop head, or give up. 1077 // (If we get confused, break out and return a conservative 'false'.) 1078 while (sub != NULL) { 1079 if (sub->is_top()) break; // Conservative answer for dead code. 1080 if (sub == dom) { 1081 if (nlist.size() == 0) { 1082 // No Region nodes except loops were visited before and the EntryControl 1083 // path was taken for loops: it did not walk in a cycle. 1084 return true; 1085 } else if (met_dom) { 1086 break; // already met before: walk in a cycle 1087 } else { 1088 // Region nodes were visited. Continue walk up to Start or Root 1089 // to make sure that it did not walk in a cycle. 1090 met_dom = true; // first time meet 1091 iterations_without_region_limit = DominatorSearchLimit; // Reset 1092 } 1093 } 1094 if (sub->is_Start() || sub->is_Root()) { 1095 // Success if we met 'dom' along a path to Start or Root. 1096 // We assume there are no alternative paths that avoid 'dom'. 1097 // (This assumption is up to the caller to ensure!) 1098 return met_dom; 1099 } 1100 Node* up = sub->in(0); 1101 // Normalize simple pass-through regions and projections: 1102 up = sub->find_exact_control(up); 1103 // If sub == up, we found a self-loop. Try to push past it. 1104 if (sub == up && sub->is_Loop()) { 1105 // Take loop entry path on the way up to 'dom'. 1106 up = sub->in(1); // in(LoopNode::EntryControl); 1107 } else if (sub == up && sub->is_Region() && sub->req() != 3) { 1108 // Always take in(1) path on the way up to 'dom' for clone regions 1109 // (with only one input) or regions which merge > 2 paths 1110 // (usually used to merge fast/slow paths). 1111 up = sub->in(1); 1112 } else if (sub == up && sub->is_Region()) { 1113 // Try both paths for Regions with 2 input paths (it may be a loop head). 1114 // It could give conservative 'false' answer without information 1115 // which region's input is the entry path. 1116 iterations_without_region_limit = DominatorSearchLimit; // Reset 1117 1118 bool region_was_visited_before = false; 1119 // Was this Region node visited before? 1120 // If so, we have reached it because we accidentally took a 1121 // loop-back edge from 'sub' back into the body of the loop, 1122 // and worked our way up again to the loop header 'sub'. 1123 // So, take the first unexplored path on the way up to 'dom'. 1124 for (int j = nlist.size() - 1; j >= 0; j--) { 1125 intptr_t ni = (intptr_t)nlist.at(j); 1126 Node* visited = (Node*)(ni & ~1); 1127 bool visited_twice_already = ((ni & 1) != 0); 1128 if (visited == sub) { 1129 if (visited_twice_already) { 1130 // Visited 2 paths, but still stuck in loop body. Give up. 1131 return false; 1132 } 1133 // The Region node was visited before only once. 1134 // (We will repush with the low bit set, below.) 1135 nlist.remove(j); 1136 // We will find a new edge and re-insert. 1137 region_was_visited_before = true; 1138 break; 1139 } 1140 } 1141 1142 // Find an incoming edge which has not been seen yet; walk through it. 1143 assert(up == sub, ""); 1144 uint skip = region_was_visited_before ? 1 : 0; 1145 for (uint i = 1; i < sub->req(); i++) { 1146 Node* in = sub->in(i); 1147 if (in != NULL && !in->is_top() && in != sub) { 1148 if (skip == 0) { 1149 up = in; 1150 break; 1151 } 1152 --skip; // skip this nontrivial input 1153 } 1154 } 1155 1156 // Set 0 bit to indicate that both paths were taken. 1157 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); 1158 } 1159 1160 if (up == sub) { 1161 break; // some kind of tight cycle 1162 } 1163 if (up == orig_sub && met_dom) { 1164 // returned back after visiting 'dom' 1165 break; // some kind of cycle 1166 } 1167 if (--iterations_without_region_limit < 0) { 1168 break; // dead cycle 1169 } 1170 sub = up; 1171 } 1172 1173 // Did not meet Root or Start node in pred. chain. 1174 // Conservative answer for dead code. 1175 return false; 1176 } 1177 1178 //------------------------------remove_dead_region----------------------------- 1179 // This control node is dead. Follow the subgraph below it making everything 1180 // using it dead as well. This will happen normally via the usual IterGVN 1181 // worklist but this call is more efficient. Do not update use-def info 1182 // inside the dead region, just at the borders. 1183 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { 1184 // Con's are a popular node to re-hit in the hash table again. 1185 if( dead->is_Con() ) return; 1186 1187 // Can't put ResourceMark here since igvn->_worklist uses the same arena 1188 // for verify pass with +VerifyOpto and we add/remove elements in it here. 1189 Node_List nstack(Thread::current()->resource_area()); 1190 1191 Node *top = igvn->C->top(); 1192 nstack.push(dead); 1193 1194 while (nstack.size() > 0) { 1195 dead = nstack.pop(); 1196 if (dead->outcnt() > 0) { 1197 // Keep dead node on stack until all uses are processed. 1198 nstack.push(dead); 1199 // For all Users of the Dead... ;-) 1200 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) { 1201 Node* use = dead->last_out(k); 1202 igvn->hash_delete(use); // Yank from hash table prior to mod 1203 if (use->in(0) == dead) { // Found another dead node 1204 assert (!use->is_Con(), "Control for Con node should be Root node."); 1205 use->set_req(0, top); // Cut dead edge to prevent processing 1206 nstack.push(use); // the dead node again. 1207 } else { // Else found a not-dead user 1208 for (uint j = 1; j < use->req(); j++) { 1209 if (use->in(j) == dead) { // Turn all dead inputs into TOP 1210 use->set_req(j, top); 1211 } 1212 } 1213 igvn->_worklist.push(use); 1214 } 1215 // Refresh the iterator, since any number of kills might have happened. 1216 k = dead->last_outs(kmin); 1217 } 1218 } else { // (dead->outcnt() == 0) 1219 // Done with outputs. 1220 igvn->hash_delete(dead); 1221 igvn->_worklist.remove(dead); 1222 igvn->set_type(dead, Type::TOP); 1223 if (dead->is_macro()) { 1224 igvn->C->remove_macro_node(dead); 1225 } 1226 // Kill all inputs to the dead guy 1227 for (uint i=0; i < dead->req(); i++) { 1228 Node *n = dead->in(i); // Get input to dead guy 1229 if (n != NULL && !n->is_top()) { // Input is valid? 1230 dead->set_req(i, top); // Smash input away 1231 if (n->outcnt() == 0) { // Input also goes dead? 1232 if (!n->is_Con()) 1233 nstack.push(n); // Clear it out as well 1234 } else if (n->outcnt() == 1 && 1235 n->has_special_unique_user()) { 1236 igvn->add_users_to_worklist( n ); 1237 } else if (n->outcnt() <= 2 && n->is_Store()) { 1238 // Push store's uses on worklist to enable folding optimization for 1239 // store/store and store/load to the same address. 1240 // The restriction (outcnt() <= 2) is the same as in set_req_X() 1241 // and remove_globally_dead_node(). 1242 igvn->add_users_to_worklist( n ); 1243 } 1244 } 1245 } 1246 } // (dead->outcnt() == 0) 1247 } // while (nstack.size() > 0) for outputs 1248 return; 1249 } 1250 1251 //------------------------------remove_dead_region----------------------------- 1252 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { 1253 Node *n = in(0); 1254 if( !n ) return false; 1255 // Lost control into this guy? I.e., it became unreachable? 1256 // Aggressively kill all unreachable code. 1257 if (can_reshape && n->is_top()) { 1258 kill_dead_code(this, phase->is_IterGVN()); 1259 return false; // Node is dead. 1260 } 1261 1262 if( n->is_Region() && n->as_Region()->is_copy() ) { 1263 Node *m = n->nonnull_req(); 1264 set_req(0, m); 1265 return true; 1266 } 1267 return false; 1268 } 1269 1270 //------------------------------Ideal_DU_postCCP------------------------------- 1271 // Idealize graph, using DU info. Must clone result into new-space 1272 Node *Node::Ideal_DU_postCCP( PhaseCCP * ) { 1273 return NULL; // Default to no change 1274 } 1275 1276 //------------------------------hash------------------------------------------- 1277 // Hash function over Nodes. 1278 uint Node::hash() const { 1279 uint sum = 0; 1280 for( uint i=0; i<_cnt; i++ ) // Add in all inputs 1281 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs 1282 return (sum>>2) + _cnt + Opcode(); 1283 } 1284 1285 //------------------------------cmp-------------------------------------------- 1286 // Compare special parts of simple Nodes 1287 uint Node::cmp( const Node &n ) const { 1288 return 1; // Must be same 1289 } 1290 1291 //------------------------------rematerialize----------------------------------- 1292 // Should we clone rather than spill this instruction? 1293 bool Node::rematerialize() const { 1294 if ( is_Mach() ) 1295 return this->as_Mach()->rematerialize(); 1296 else 1297 return (_flags & Flag_rematerialize) != 0; 1298 } 1299 1300 //------------------------------needs_anti_dependence_check--------------------- 1301 // Nodes which use memory without consuming it, hence need antidependences. 1302 bool Node::needs_anti_dependence_check() const { 1303 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 1304 return false; 1305 else 1306 return in(1)->bottom_type()->has_memory(); 1307 } 1308 1309 1310 // Get an integer constant from a ConNode (or CastIINode). 1311 // Return a default value if there is no apparent constant here. 1312 const TypeInt* Node::find_int_type() const { 1313 if (this->is_Type()) { 1314 return this->as_Type()->type()->isa_int(); 1315 } else if (this->is_Con()) { 1316 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1317 return this->bottom_type()->isa_int(); 1318 } 1319 return NULL; 1320 } 1321 1322 // Get a pointer constant from a ConstNode. 1323 // Returns the constant if it is a pointer ConstNode 1324 intptr_t Node::get_ptr() const { 1325 assert( Opcode() == Op_ConP, "" ); 1326 return ((ConPNode*)this)->type()->is_ptr()->get_con(); 1327 } 1328 1329 // Get a narrow oop constant from a ConNNode. 1330 intptr_t Node::get_narrowcon() const { 1331 assert( Opcode() == Op_ConN, "" ); 1332 return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); 1333 } 1334 1335 // Get a long constant from a ConNode. 1336 // Return a default value if there is no apparent constant here. 1337 const TypeLong* Node::find_long_type() const { 1338 if (this->is_Type()) { 1339 return this->as_Type()->type()->isa_long(); 1340 } else if (this->is_Con()) { 1341 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1342 return this->bottom_type()->isa_long(); 1343 } 1344 return NULL; 1345 } 1346 1347 // Get a double constant from a ConstNode. 1348 // Returns the constant if it is a double ConstNode 1349 jdouble Node::getd() const { 1350 assert( Opcode() == Op_ConD, "" ); 1351 return ((ConDNode*)this)->type()->is_double_constant()->getd(); 1352 } 1353 1354 // Get a float constant from a ConstNode. 1355 // Returns the constant if it is a float ConstNode 1356 jfloat Node::getf() const { 1357 assert( Opcode() == Op_ConF, "" ); 1358 return ((ConFNode*)this)->type()->is_float_constant()->getf(); 1359 } 1360 1361 #ifndef PRODUCT 1362 1363 //----------------------------NotANode---------------------------------------- 1364 // Used in debugging code to avoid walking across dead or uninitialized edges. 1365 static inline bool NotANode(const Node* n) { 1366 if (n == NULL) return true; 1367 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1368 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1369 return false; 1370 } 1371 1372 1373 //------------------------------find------------------------------------------ 1374 // Find a neighbor of this Node with the given _idx 1375 // If idx is negative, find its absolute value, following both _in and _out. 1376 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, 1377 VectorSet* old_space, VectorSet* new_space ) { 1378 int node_idx = (idx >= 0) ? idx : -idx; 1379 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. 1380 // Contained in new_space or old_space? Check old_arena first since it's mostly empty. 1381 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; 1382 if( v->test(n->_idx) ) return; 1383 if( (int)n->_idx == node_idx 1384 debug_only(|| n->debug_idx() == node_idx) ) { 1385 if (result != NULL) 1386 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n", 1387 (uintptr_t)result, (uintptr_t)n, node_idx); 1388 result = n; 1389 } 1390 v->set(n->_idx); 1391 for( uint i=0; i<n->len(); i++ ) { 1392 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; 1393 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); 1394 } 1395 // Search along forward edges also: 1396 if (idx < 0 && !only_ctrl) { 1397 for( uint j=0; j<n->outcnt(); j++ ) { 1398 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); 1399 } 1400 } 1401 #ifdef ASSERT 1402 // Search along debug_orig edges last, checking for cycles 1403 Node* orig = n->debug_orig(); 1404 if (orig != NULL) { 1405 do { 1406 if (NotANode(orig)) break; 1407 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); 1408 orig = orig->debug_orig(); 1409 } while (orig != NULL && orig != n->debug_orig()); 1410 } 1411 #endif //ASSERT 1412 } 1413 1414 // call this from debugger: 1415 Node* find_node(Node* n, int idx) { 1416 return n->find(idx); 1417 } 1418 1419 //------------------------------find------------------------------------------- 1420 Node* Node::find(int idx) const { 1421 ResourceArea *area = Thread::current()->resource_area(); 1422 VectorSet old_space(area), new_space(area); 1423 Node* result = NULL; 1424 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); 1425 return result; 1426 } 1427 1428 //------------------------------find_ctrl-------------------------------------- 1429 // Find an ancestor to this node in the control history with given _idx 1430 Node* Node::find_ctrl(int idx) const { 1431 ResourceArea *area = Thread::current()->resource_area(); 1432 VectorSet old_space(area), new_space(area); 1433 Node* result = NULL; 1434 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); 1435 return result; 1436 } 1437 #endif 1438 1439 1440 1441 #ifndef PRODUCT 1442 int Node::_in_dump_cnt = 0; 1443 1444 // -----------------------------Name------------------------------------------- 1445 extern const char *NodeClassNames[]; 1446 const char *Node::Name() const { return NodeClassNames[Opcode()]; } 1447 1448 static bool is_disconnected(const Node* n) { 1449 for (uint i = 0; i < n->req(); i++) { 1450 if (n->in(i) != NULL) return false; 1451 } 1452 return true; 1453 } 1454 1455 #ifdef ASSERT 1456 static void dump_orig(Node* orig) { 1457 Compile* C = Compile::current(); 1458 if (NotANode(orig)) orig = NULL; 1459 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1460 if (orig == NULL) return; 1461 tty->print(" !orig="); 1462 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops 1463 if (NotANode(fast)) fast = NULL; 1464 while (orig != NULL) { 1465 bool discon = is_disconnected(orig); // if discon, print [123] else 123 1466 if (discon) tty->print("["); 1467 if (!Compile::current()->node_arena()->contains(orig)) 1468 tty->print("o"); 1469 tty->print("%d", orig->_idx); 1470 if (discon) tty->print("]"); 1471 orig = orig->debug_orig(); 1472 if (NotANode(orig)) orig = NULL; 1473 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1474 if (orig != NULL) tty->print(","); 1475 if (fast != NULL) { 1476 // Step fast twice for each single step of orig: 1477 fast = fast->debug_orig(); 1478 if (NotANode(fast)) fast = NULL; 1479 if (fast != NULL && fast != orig) { 1480 fast = fast->debug_orig(); 1481 if (NotANode(fast)) fast = NULL; 1482 } 1483 if (fast == orig) { 1484 tty->print("..."); 1485 break; 1486 } 1487 } 1488 } 1489 } 1490 1491 void Node::set_debug_orig(Node* orig) { 1492 _debug_orig = orig; 1493 if (BreakAtNode == 0) return; 1494 if (NotANode(orig)) orig = NULL; 1495 int trip = 10; 1496 while (orig != NULL) { 1497 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { 1498 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", 1499 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); 1500 BREAKPOINT; 1501 } 1502 orig = orig->debug_orig(); 1503 if (NotANode(orig)) orig = NULL; 1504 if (trip-- <= 0) break; 1505 } 1506 } 1507 #endif //ASSERT 1508 1509 //------------------------------dump------------------------------------------ 1510 // Dump a Node 1511 void Node::dump() const { 1512 Compile* C = Compile::current(); 1513 bool is_new = C->node_arena()->contains(this); 1514 _in_dump_cnt++; 1515 tty->print("%c%d\t%s\t=== ", 1516 is_new ? ' ' : 'o', _idx, Name()); 1517 1518 // Dump the required and precedence inputs 1519 dump_req(); 1520 dump_prec(); 1521 // Dump the outputs 1522 dump_out(); 1523 1524 if (is_disconnected(this)) { 1525 #ifdef ASSERT 1526 tty->print(" [%d]",debug_idx()); 1527 dump_orig(debug_orig()); 1528 #endif 1529 tty->cr(); 1530 _in_dump_cnt--; 1531 return; // don't process dead nodes 1532 } 1533 1534 // Dump node-specific info 1535 dump_spec(tty); 1536 #ifdef ASSERT 1537 // Dump the non-reset _debug_idx 1538 if( Verbose && WizardMode ) { 1539 tty->print(" [%d]",debug_idx()); 1540 } 1541 #endif 1542 1543 const Type *t = bottom_type(); 1544 1545 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { 1546 const TypeInstPtr *toop = t->isa_instptr(); 1547 const TypeKlassPtr *tkls = t->isa_klassptr(); 1548 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); 1549 if( klass && klass->is_loaded() && klass->is_interface() ) { 1550 tty->print(" Interface:"); 1551 } else if( toop ) { 1552 tty->print(" Oop:"); 1553 } else if( tkls ) { 1554 tty->print(" Klass:"); 1555 } 1556 t->dump(); 1557 } else if( t == Type::MEMORY ) { 1558 tty->print(" Memory:"); 1559 MemNode::dump_adr_type(this, adr_type(), tty); 1560 } else if( Verbose || WizardMode ) { 1561 tty->print(" Type:"); 1562 if( t ) { 1563 t->dump(); 1564 } else { 1565 tty->print("no type"); 1566 } 1567 } 1568 if (is_new) { 1569 debug_only(dump_orig(debug_orig())); 1570 Node_Notes* nn = C->node_notes_at(_idx); 1571 if (nn != NULL && !nn->is_clear()) { 1572 if (nn->jvms() != NULL) { 1573 tty->print(" !jvms:"); 1574 nn->jvms()->dump_spec(tty); 1575 } 1576 } 1577 } 1578 tty->cr(); 1579 _in_dump_cnt--; 1580 } 1581 1582 //------------------------------dump_req-------------------------------------- 1583 void Node::dump_req() const { 1584 // Dump the required input edges 1585 for (uint i = 0; i < req(); i++) { // For all required inputs 1586 Node* d = in(i); 1587 if (d == NULL) { 1588 tty->print("_ "); 1589 } else if (NotANode(d)) { 1590 tty->print("NotANode "); // uninitialized, sentinel, garbage, etc. 1591 } else { 1592 tty->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx); 1593 } 1594 } 1595 } 1596 1597 1598 //------------------------------dump_prec------------------------------------- 1599 void Node::dump_prec() const { 1600 // Dump the precedence edges 1601 int any_prec = 0; 1602 for (uint i = req(); i < len(); i++) { // For all precedence inputs 1603 Node* p = in(i); 1604 if (p != NULL) { 1605 if( !any_prec++ ) tty->print(" |"); 1606 if (NotANode(p)) { tty->print("NotANode "); continue; } 1607 tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1608 } 1609 } 1610 } 1611 1612 //------------------------------dump_out-------------------------------------- 1613 void Node::dump_out() const { 1614 // Delimit the output edges 1615 tty->print(" [["); 1616 // Dump the output edges 1617 for (uint i = 0; i < _outcnt; i++) { // For all outputs 1618 Node* u = _out[i]; 1619 if (u == NULL) { 1620 tty->print("_ "); 1621 } else if (NotANode(u)) { 1622 tty->print("NotANode "); 1623 } else { 1624 tty->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx); 1625 } 1626 } 1627 tty->print("]] "); 1628 } 1629 1630 //------------------------------dump_nodes------------------------------------- 1631 static void dump_nodes(const Node* start, int d, bool only_ctrl) { 1632 Node* s = (Node*)start; // remove const 1633 if (NotANode(s)) return; 1634 1635 uint depth = (uint)ABS(d); 1636 int direction = d; 1637 Compile* C = Compile::current(); 1638 GrowableArray <Node *> nstack(C->unique()); 1639 1640 nstack.append(s); 1641 int begin = 0; 1642 int end = 0; 1643 for(uint i = 0; i < depth; i++) { 1644 end = nstack.length(); 1645 for(int j = begin; j < end; j++) { 1646 Node* tp = nstack.at(j); 1647 uint limit = direction > 0 ? tp->len() : tp->outcnt(); 1648 for(uint k = 0; k < limit; k++) { 1649 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); 1650 1651 if (NotANode(n)) continue; 1652 // do not recurse through top or the root (would reach unrelated stuff) 1653 if (n->is_Root() || n->is_top()) continue; 1654 if (only_ctrl && !n->is_CFG()) continue; 1655 1656 bool on_stack = nstack.contains(n); 1657 if (!on_stack) { 1658 nstack.append(n); 1659 } 1660 } 1661 } 1662 begin = end; 1663 } 1664 end = nstack.length(); 1665 if (direction > 0) { 1666 for(int j = end-1; j >= 0; j--) { 1667 nstack.at(j)->dump(); 1668 } 1669 } else { 1670 for(int j = 0; j < end; j++) { 1671 nstack.at(j)->dump(); 1672 } 1673 } 1674 } 1675 1676 //------------------------------dump------------------------------------------- 1677 void Node::dump(int d) const { 1678 dump_nodes(this, d, false); 1679 } 1680 1681 //------------------------------dump_ctrl-------------------------------------- 1682 // Dump a Node's control history to depth 1683 void Node::dump_ctrl(int d) const { 1684 dump_nodes(this, d, true); 1685 } 1686 1687 // VERIFICATION CODE 1688 // For each input edge to a node (ie - for each Use-Def edge), verify that 1689 // there is a corresponding Def-Use edge. 1690 //------------------------------verify_edges----------------------------------- 1691 void Node::verify_edges(Unique_Node_List &visited) { 1692 uint i, j, idx; 1693 int cnt; 1694 Node *n; 1695 1696 // Recursive termination test 1697 if (visited.member(this)) return; 1698 visited.push(this); 1699 1700 // Walk over all input edges, checking for correspondence 1701 for( i = 0; i < len(); i++ ) { 1702 n = in(i); 1703 if (n != NULL && !n->is_top()) { 1704 // Count instances of (Node *)this 1705 cnt = 0; 1706 for (idx = 0; idx < n->_outcnt; idx++ ) { 1707 if (n->_out[idx] == (Node *)this) cnt++; 1708 } 1709 assert( cnt > 0,"Failed to find Def-Use edge." ); 1710 // Check for duplicate edges 1711 // walk the input array downcounting the input edges to n 1712 for( j = 0; j < len(); j++ ) { 1713 if( in(j) == n ) cnt--; 1714 } 1715 assert( cnt == 0,"Mismatched edge count."); 1716 } else if (n == NULL) { 1717 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges"); 1718 } else { 1719 assert(n->is_top(), "sanity"); 1720 // Nothing to check. 1721 } 1722 } 1723 // Recursive walk over all input edges 1724 for( i = 0; i < len(); i++ ) { 1725 n = in(i); 1726 if( n != NULL ) 1727 in(i)->verify_edges(visited); 1728 } 1729 } 1730 1731 //------------------------------verify_recur----------------------------------- 1732 static const Node *unique_top = NULL; 1733 1734 void Node::verify_recur(const Node *n, int verify_depth, 1735 VectorSet &old_space, VectorSet &new_space) { 1736 if ( verify_depth == 0 ) return; 1737 if (verify_depth > 0) --verify_depth; 1738 1739 Compile* C = Compile::current(); 1740 1741 // Contained in new_space or old_space? 1742 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 1743 // Check for visited in the proper space. Numberings are not unique 1744 // across spaces so we need a separate VectorSet for each space. 1745 if( v->test_set(n->_idx) ) return; 1746 1747 if (n->is_Con() && n->bottom_type() == Type::TOP) { 1748 if (C->cached_top_node() == NULL) 1749 C->set_cached_top_node((Node*)n); 1750 assert(C->cached_top_node() == n, "TOP node must be unique"); 1751 } 1752 1753 for( uint i = 0; i < n->len(); i++ ) { 1754 Node *x = n->in(i); 1755 if (!x || x->is_top()) continue; 1756 1757 // Verify my input has a def-use edge to me 1758 if (true /*VerifyDefUse*/) { 1759 // Count use-def edges from n to x 1760 int cnt = 0; 1761 for( uint j = 0; j < n->len(); j++ ) 1762 if( n->in(j) == x ) 1763 cnt++; 1764 // Count def-use edges from x to n 1765 uint max = x->_outcnt; 1766 for( uint k = 0; k < max; k++ ) 1767 if (x->_out[k] == n) 1768 cnt--; 1769 assert( cnt == 0, "mismatched def-use edge counts" ); 1770 } 1771 1772 verify_recur(x, verify_depth, old_space, new_space); 1773 } 1774 1775 } 1776 1777 //------------------------------verify----------------------------------------- 1778 // Check Def-Use info for my subgraph 1779 void Node::verify() const { 1780 Compile* C = Compile::current(); 1781 Node* old_top = C->cached_top_node(); 1782 ResourceMark rm; 1783 ResourceArea *area = Thread::current()->resource_area(); 1784 VectorSet old_space(area), new_space(area); 1785 verify_recur(this, -1, old_space, new_space); 1786 C->set_cached_top_node(old_top); 1787 } 1788 #endif 1789 1790 1791 //------------------------------walk------------------------------------------- 1792 // Graph walk, with both pre-order and post-order functions 1793 void Node::walk(NFunc pre, NFunc post, void *env) { 1794 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk 1795 walk_(pre, post, env, visited); 1796 } 1797 1798 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) { 1799 if( visited.test_set(_idx) ) return; 1800 pre(*this,env); // Call the pre-order walk function 1801 for( uint i=0; i<_max; i++ ) 1802 if( in(i) ) // Input exists and is not walked? 1803 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions 1804 post(*this,env); // Call the post-order walk function 1805 } 1806 1807 void Node::nop(Node &, void*) {} 1808 1809 //------------------------------Registers-------------------------------------- 1810 // Do we Match on this edge index or not? Generally false for Control 1811 // and true for everything else. Weird for calls & returns. 1812 uint Node::match_edge(uint idx) const { 1813 return idx; // True for other than index 0 (control) 1814 } 1815 1816 // Register classes are defined for specific machines 1817 const RegMask &Node::out_RegMask() const { 1818 ShouldNotCallThis(); 1819 return *(new RegMask()); 1820 } 1821 1822 const RegMask &Node::in_RegMask(uint) const { 1823 ShouldNotCallThis(); 1824 return *(new RegMask()); 1825 } 1826 1827 //============================================================================= 1828 //----------------------------------------------------------------------------- 1829 void Node_Array::reset( Arena *new_arena ) { 1830 _a->Afree(_nodes,_max*sizeof(Node*)); 1831 _max = 0; 1832 _nodes = NULL; 1833 _a = new_arena; 1834 } 1835 1836 //------------------------------clear------------------------------------------ 1837 // Clear all entries in _nodes to NULL but keep storage 1838 void Node_Array::clear() { 1839 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) ); 1840 } 1841 1842 //----------------------------------------------------------------------------- 1843 void Node_Array::grow( uint i ) { 1844 if( !_max ) { 1845 _max = 1; 1846 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) ); 1847 _nodes[0] = NULL; 1848 } 1849 uint old = _max; 1850 while( i >= _max ) _max <<= 1; // Double to fit 1851 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*)); 1852 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) ); 1853 } 1854 1855 //----------------------------------------------------------------------------- 1856 void Node_Array::insert( uint i, Node *n ) { 1857 if( _nodes[_max-1] ) grow(_max); // Get more space if full 1858 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*))); 1859 _nodes[i] = n; 1860 } 1861 1862 //----------------------------------------------------------------------------- 1863 void Node_Array::remove( uint i ) { 1864 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*))); 1865 _nodes[_max-1] = NULL; 1866 } 1867 1868 //----------------------------------------------------------------------------- 1869 void Node_Array::sort( C_sort_func_t func) { 1870 qsort( _nodes, _max, sizeof( Node* ), func ); 1871 } 1872 1873 //----------------------------------------------------------------------------- 1874 void Node_Array::dump() const { 1875 #ifndef PRODUCT 1876 for( uint i = 0; i < _max; i++ ) { 1877 Node *nn = _nodes[i]; 1878 if( nn != NULL ) { 1879 tty->print("%5d--> ",i); nn->dump(); 1880 } 1881 } 1882 #endif 1883 } 1884 1885 //--------------------------is_iteratively_computed------------------------------ 1886 // Operation appears to be iteratively computed (such as an induction variable) 1887 // It is possible for this operation to return false for a loop-varying 1888 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 1889 bool Node::is_iteratively_computed() { 1890 if (ideal_reg()) { // does operation have a result register? 1891 for (uint i = 1; i < req(); i++) { 1892 Node* n = in(i); 1893 if (n != NULL && n->is_Phi()) { 1894 for (uint j = 1; j < n->req(); j++) { 1895 if (n->in(j) == this) { 1896 return true; 1897 } 1898 } 1899 } 1900 } 1901 } 1902 return false; 1903 } 1904 1905 //--------------------------find_similar------------------------------ 1906 // Return a node with opcode "opc" and same inputs as "this" if one can 1907 // be found; Otherwise return NULL; 1908 Node* Node::find_similar(int opc) { 1909 if (req() >= 2) { 1910 Node* def = in(1); 1911 if (def && def->outcnt() >= 2) { 1912 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) { 1913 Node* use = def->fast_out(i); 1914 if (use->Opcode() == opc && 1915 use->req() == req()) { 1916 uint j; 1917 for (j = 0; j < use->req(); j++) { 1918 if (use->in(j) != in(j)) { 1919 break; 1920 } 1921 } 1922 if (j == use->req()) { 1923 return use; 1924 } 1925 } 1926 } 1927 } 1928 } 1929 return NULL; 1930 } 1931 1932 1933 //--------------------------unique_ctrl_out------------------------------ 1934 // Return the unique control out if only one. Null if none or more than one. 1935 Node* Node::unique_ctrl_out() { 1936 Node* found = NULL; 1937 for (uint i = 0; i < outcnt(); i++) { 1938 Node* use = raw_out(i); 1939 if (use->is_CFG() && use != this) { 1940 if (found != NULL) return NULL; 1941 found = use; 1942 } 1943 } 1944 return found; 1945 } 1946 1947 //============================================================================= 1948 //------------------------------yank------------------------------------------- 1949 // Find and remove 1950 void Node_List::yank( Node *n ) { 1951 uint i; 1952 for( i = 0; i < _cnt; i++ ) 1953 if( _nodes[i] == n ) 1954 break; 1955 1956 if( i < _cnt ) 1957 _nodes[i] = _nodes[--_cnt]; 1958 } 1959 1960 //------------------------------dump------------------------------------------- 1961 void Node_List::dump() const { 1962 #ifndef PRODUCT 1963 for( uint i = 0; i < _cnt; i++ ) 1964 if( _nodes[i] ) { 1965 tty->print("%5d--> ",i); 1966 _nodes[i]->dump(); 1967 } 1968 #endif 1969 } 1970 1971 //============================================================================= 1972 //------------------------------remove----------------------------------------- 1973 void Unique_Node_List::remove( Node *n ) { 1974 if( _in_worklist[n->_idx] ) { 1975 for( uint i = 0; i < size(); i++ ) 1976 if( _nodes[i] == n ) { 1977 map(i,Node_List::pop()); 1978 _in_worklist >>= n->_idx; 1979 return; 1980 } 1981 ShouldNotReachHere(); 1982 } 1983 } 1984 1985 //-----------------------remove_useless_nodes---------------------------------- 1986 // Remove useless nodes from worklist 1987 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { 1988 1989 for( uint i = 0; i < size(); ++i ) { 1990 Node *n = at(i); 1991 assert( n != NULL, "Did not expect null entries in worklist"); 1992 if( ! useful.test(n->_idx) ) { 1993 _in_worklist >>= n->_idx; 1994 map(i,Node_List::pop()); 1995 // Node *replacement = Node_List::pop(); 1996 // if( i != size() ) { // Check if removing last entry 1997 // _nodes[i] = replacement; 1998 // } 1999 --i; // Visit popped node 2000 // If it was last entry, loop terminates since size() was also reduced 2001 } 2002 } 2003 } 2004 2005 //============================================================================= 2006 void Node_Stack::grow() { 2007 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top 2008 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode)); 2009 size_t max = old_max << 1; // max * 2 2010 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max); 2011 _inode_max = _inodes + max; 2012 _inode_top = _inodes + old_top; // restore _top 2013 } 2014 2015 //============================================================================= 2016 uint TypeNode::size_of() const { return sizeof(*this); } 2017 #ifndef PRODUCT 2018 void TypeNode::dump_spec(outputStream *st) const { 2019 if( !Verbose && !WizardMode ) { 2020 // standard dump does this in Verbose and WizardMode 2021 st->print(" #"); _type->dump_on(st); 2022 } 2023 } 2024 #endif 2025 uint TypeNode::hash() const { 2026 return Node::hash() + _type->hash(); 2027 } 2028 uint TypeNode::cmp( const Node &n ) const 2029 { return !Type::cmp( _type, ((TypeNode&)n)._type ); } 2030 const Type *TypeNode::bottom_type() const { return _type; } 2031 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; } 2032 2033 //------------------------------ideal_reg-------------------------------------- 2034 uint TypeNode::ideal_reg() const { 2035 return Matcher::base2reg[_type->base()]; 2036 }