1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/cfgnode.hpp" 29 #include "opto/connode.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/machnode.hpp" 32 #include "opto/matcher.hpp" 33 #include "opto/node.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/regmask.hpp" 36 #include "opto/type.hpp" 37 #include "utilities/copy.hpp" 38 39 class RegMask; 40 // #include "phase.hpp" 41 class PhaseTransform; 42 class PhaseGVN; 43 44 // Arena we are currently building Nodes in 45 const uint Node::NotAMachineReg = 0xffff0000; 46 47 #ifndef PRODUCT 48 extern int nodes_created; 49 #endif 50 51 #ifdef ASSERT 52 53 //-------------------------- construct_node------------------------------------ 54 // Set a breakpoint here to identify where a particular node index is built. 55 void Node::verify_construction() { 56 _debug_orig = NULL; 57 int old_debug_idx = Compile::debug_idx(); 58 int new_debug_idx = old_debug_idx+1; 59 if (new_debug_idx > 0) { 60 // Arrange that the lowest five decimal digits of _debug_idx 61 // will repeat those of _idx. In case this is somehow pathological, 62 // we continue to assign negative numbers (!) consecutively. 63 const int mod = 100000; 64 int bump = (int)(_idx - new_debug_idx) % mod; 65 if (bump < 0) bump += mod; 66 assert(bump >= 0 && bump < mod, ""); 67 new_debug_idx += bump; 68 } 69 Compile::set_debug_idx(new_debug_idx); 70 set_debug_idx( new_debug_idx ); 71 assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX"); 72 assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit"); 73 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) { 74 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx); 75 BREAKPOINT; 76 } 77 #if OPTO_DU_ITERATOR_ASSERT 78 _last_del = NULL; 79 _del_tick = 0; 80 #endif 81 _hash_lock = 0; 82 } 83 84 85 // #ifdef ASSERT ... 86 87 #if OPTO_DU_ITERATOR_ASSERT 88 void DUIterator_Common::sample(const Node* node) { 89 _vdui = VerifyDUIterators; 90 _node = node; 91 _outcnt = node->_outcnt; 92 _del_tick = node->_del_tick; 93 _last = NULL; 94 } 95 96 void DUIterator_Common::verify(const Node* node, bool at_end_ok) { 97 assert(_node == node, "consistent iterator source"); 98 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed"); 99 } 100 101 void DUIterator_Common::verify_resync() { 102 // Ensure that the loop body has just deleted the last guy produced. 103 const Node* node = _node; 104 // Ensure that at least one copy of the last-seen edge was deleted. 105 // Note: It is OK to delete multiple copies of the last-seen edge. 106 // Unfortunately, we have no way to verify that all the deletions delete 107 // that same edge. On this point we must use the Honor System. 108 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge"); 109 assert(node->_last_del == _last, "must have deleted the edge just produced"); 110 // We liked this deletion, so accept the resulting outcnt and tick. 111 _outcnt = node->_outcnt; 112 _del_tick = node->_del_tick; 113 } 114 115 void DUIterator_Common::reset(const DUIterator_Common& that) { 116 if (this == &that) return; // ignore assignment to self 117 if (!_vdui) { 118 // We need to initialize everything, overwriting garbage values. 119 _last = that._last; 120 _vdui = that._vdui; 121 } 122 // Note: It is legal (though odd) for an iterator over some node x 123 // to be reassigned to iterate over another node y. Some doubly-nested 124 // progress loops depend on being able to do this. 125 const Node* node = that._node; 126 // Re-initialize everything, except _last. 127 _node = node; 128 _outcnt = node->_outcnt; 129 _del_tick = node->_del_tick; 130 } 131 132 void DUIterator::sample(const Node* node) { 133 DUIterator_Common::sample(node); // Initialize the assertion data. 134 _refresh_tick = 0; // No refreshes have happened, as yet. 135 } 136 137 void DUIterator::verify(const Node* node, bool at_end_ok) { 138 DUIterator_Common::verify(node, at_end_ok); 139 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range"); 140 } 141 142 void DUIterator::verify_increment() { 143 if (_refresh_tick & 1) { 144 // We have refreshed the index during this loop. 145 // Fix up _idx to meet asserts. 146 if (_idx > _outcnt) _idx = _outcnt; 147 } 148 verify(_node, true); 149 } 150 151 void DUIterator::verify_resync() { 152 // Note: We do not assert on _outcnt, because insertions are OK here. 153 DUIterator_Common::verify_resync(); 154 // Make sure we are still in sync, possibly with no more out-edges: 155 verify(_node, true); 156 } 157 158 void DUIterator::reset(const DUIterator& that) { 159 if (this == &that) return; // self assignment is always a no-op 160 assert(that._refresh_tick == 0, "assign only the result of Node::outs()"); 161 assert(that._idx == 0, "assign only the result of Node::outs()"); 162 assert(_idx == that._idx, "already assigned _idx"); 163 if (!_vdui) { 164 // We need to initialize everything, overwriting garbage values. 165 sample(that._node); 166 } else { 167 DUIterator_Common::reset(that); 168 if (_refresh_tick & 1) { 169 _refresh_tick++; // Clear the "was refreshed" flag. 170 } 171 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly"); 172 } 173 } 174 175 void DUIterator::refresh() { 176 DUIterator_Common::sample(_node); // Re-fetch assertion data. 177 _refresh_tick |= 1; // Set the "was refreshed" flag. 178 } 179 180 void DUIterator::verify_finish() { 181 // If the loop has killed the node, do not require it to re-run. 182 if (_node->_outcnt == 0) _refresh_tick &= ~1; 183 // If this assert triggers, it means that a loop used refresh_out_pos 184 // to re-synch an iteration index, but the loop did not correctly 185 // re-run itself, using a "while (progress)" construct. 186 // This iterator enforces the rule that you must keep trying the loop 187 // until it "runs clean" without any need for refreshing. 188 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing"); 189 } 190 191 192 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) { 193 DUIterator_Common::verify(node, at_end_ok); 194 Node** out = node->_out; 195 uint cnt = node->_outcnt; 196 assert(cnt == _outcnt, "no insertions allowed"); 197 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range"); 198 // This last check is carefully designed to work for NO_OUT_ARRAY. 199 } 200 201 void DUIterator_Fast::verify_limit() { 202 const Node* node = _node; 203 verify(node, true); 204 assert(_outp == node->_out + node->_outcnt, "limit still correct"); 205 } 206 207 void DUIterator_Fast::verify_resync() { 208 const Node* node = _node; 209 if (_outp == node->_out + _outcnt) { 210 // Note that the limit imax, not the pointer i, gets updated with the 211 // exact count of deletions. (For the pointer it's always "--i".) 212 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)"); 213 // This is a limit pointer, with a name like "imax". 214 // Fudge the _last field so that the common assert will be happy. 215 _last = (Node*) node->_last_del; 216 DUIterator_Common::verify_resync(); 217 } else { 218 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)"); 219 // A normal internal pointer. 220 DUIterator_Common::verify_resync(); 221 // Make sure we are still in sync, possibly with no more out-edges: 222 verify(node, true); 223 } 224 } 225 226 void DUIterator_Fast::verify_relimit(uint n) { 227 const Node* node = _node; 228 assert((int)n > 0, "use imax -= n only with a positive count"); 229 // This must be a limit pointer, with a name like "imax". 230 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)"); 231 // The reported number of deletions must match what the node saw. 232 assert(node->_del_tick == _del_tick + n, "must have deleted n edges"); 233 // Fudge the _last field so that the common assert will be happy. 234 _last = (Node*) node->_last_del; 235 DUIterator_Common::verify_resync(); 236 } 237 238 void DUIterator_Fast::reset(const DUIterator_Fast& that) { 239 assert(_outp == that._outp, "already assigned _outp"); 240 DUIterator_Common::reset(that); 241 } 242 243 void DUIterator_Last::verify(const Node* node, bool at_end_ok) { 244 // at_end_ok means the _outp is allowed to underflow by 1 245 _outp += at_end_ok; 246 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc. 247 _outp -= at_end_ok; 248 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes"); 249 } 250 251 void DUIterator_Last::verify_limit() { 252 // Do not require the limit address to be resynched. 253 //verify(node, true); 254 assert(_outp == _node->_out, "limit still correct"); 255 } 256 257 void DUIterator_Last::verify_step(uint num_edges) { 258 assert((int)num_edges > 0, "need non-zero edge count for loop progress"); 259 _outcnt -= num_edges; 260 _del_tick += num_edges; 261 // Make sure we are still in sync, possibly with no more out-edges: 262 const Node* node = _node; 263 verify(node, true); 264 assert(node->_last_del == _last, "must have deleted the edge just produced"); 265 } 266 267 #endif //OPTO_DU_ITERATOR_ASSERT 268 269 270 #endif //ASSERT 271 272 273 // This constant used to initialize _out may be any non-null value. 274 // The value NULL is reserved for the top node only. 275 #define NO_OUT_ARRAY ((Node**)-1) 276 277 // This funny expression handshakes with Node::operator new 278 // to pull Compile::current out of the new node's _out field, 279 // and then calls a subroutine which manages most field 280 // initializations. The only one which is tricky is the 281 // _idx field, which is const, and so must be initialized 282 // by a return value, not an assignment. 283 // 284 // (Aren't you thankful that Java finals don't require so many tricks?) 285 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out) 286 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355 287 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 288 #endif 289 290 // Out-of-line code from node constructors. 291 // Executed only when extra debug info. is being passed around. 292 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) { 293 C->set_node_notes_at(idx, nn); 294 } 295 296 // Shared initialization code. 297 inline int Node::Init(int req, Compile* C) { 298 assert(Compile::current() == C, "must use operator new(Compile*)"); 299 int idx = C->next_unique(); 300 301 // Allocate memory for the necessary number of edges. 302 if (req > 0) { 303 // Allocate space for _in array to have double alignment. 304 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*)))); 305 #ifdef ASSERT 306 _in[req-1] = this; // magic cookie for assertion check 307 #endif 308 } 309 // If there are default notes floating around, capture them: 310 Node_Notes* nn = C->default_node_notes(); 311 if (nn != NULL) init_node_notes(C, idx, nn); 312 313 // Note: At this point, C is dead, 314 // and we begin to initialize the new Node. 315 316 _cnt = _max = req; 317 _outcnt = _outmax = 0; 318 _class_id = Class_Node; 319 _flags = 0; 320 _out = NO_OUT_ARRAY; 321 return idx; 322 } 323 324 //------------------------------Node------------------------------------------- 325 // Create a Node, with a given number of required edges. 326 Node::Node(uint req) 327 : _idx(IDX_INIT(req)) 328 #ifdef ASSERT 329 , _parse_idx(_idx) 330 #endif 331 { 332 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" ); 333 debug_only( verify_construction() ); 334 NOT_PRODUCT(nodes_created++); 335 if (req == 0) { 336 assert( _in == (Node**)this, "Must not pass arg count to 'new'" ); 337 _in = NULL; 338 } else { 339 assert( _in[req-1] == this, "Must pass arg count to 'new'" ); 340 Node** to = _in; 341 for(uint i = 0; i < req; i++) { 342 to[i] = NULL; 343 } 344 } 345 } 346 347 //------------------------------Node------------------------------------------- 348 Node::Node(Node *n0) 349 : _idx(IDX_INIT(1)) 350 #ifdef ASSERT 351 , _parse_idx(_idx) 352 #endif 353 { 354 debug_only( verify_construction() ); 355 NOT_PRODUCT(nodes_created++); 356 // Assert we allocated space for input array already 357 assert( _in[0] == this, "Must pass arg count to 'new'" ); 358 assert( is_not_dead(n0), "can not use dead node"); 359 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 360 } 361 362 //------------------------------Node------------------------------------------- 363 Node::Node(Node *n0, Node *n1) 364 : _idx(IDX_INIT(2)) 365 #ifdef ASSERT 366 , _parse_idx(_idx) 367 #endif 368 { 369 debug_only( verify_construction() ); 370 NOT_PRODUCT(nodes_created++); 371 // Assert we allocated space for input array already 372 assert( _in[1] == this, "Must pass arg count to 'new'" ); 373 assert( is_not_dead(n0), "can not use dead node"); 374 assert( is_not_dead(n1), "can not use dead node"); 375 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 376 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 377 } 378 379 //------------------------------Node------------------------------------------- 380 Node::Node(Node *n0, Node *n1, Node *n2) 381 : _idx(IDX_INIT(3)) 382 #ifdef ASSERT 383 , _parse_idx(_idx) 384 #endif 385 { 386 debug_only( verify_construction() ); 387 NOT_PRODUCT(nodes_created++); 388 // Assert we allocated space for input array already 389 assert( _in[2] == this, "Must pass arg count to 'new'" ); 390 assert( is_not_dead(n0), "can not use dead node"); 391 assert( is_not_dead(n1), "can not use dead node"); 392 assert( is_not_dead(n2), "can not use dead node"); 393 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 394 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 395 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 396 } 397 398 //------------------------------Node------------------------------------------- 399 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) 400 : _idx(IDX_INIT(4)) 401 #ifdef ASSERT 402 , _parse_idx(_idx) 403 #endif 404 { 405 debug_only( verify_construction() ); 406 NOT_PRODUCT(nodes_created++); 407 // Assert we allocated space for input array already 408 assert( _in[3] == this, "Must pass arg count to 'new'" ); 409 assert( is_not_dead(n0), "can not use dead node"); 410 assert( is_not_dead(n1), "can not use dead node"); 411 assert( is_not_dead(n2), "can not use dead node"); 412 assert( is_not_dead(n3), "can not use dead node"); 413 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 414 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 415 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 416 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 417 } 418 419 //------------------------------Node------------------------------------------- 420 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) 421 : _idx(IDX_INIT(5)) 422 #ifdef ASSERT 423 , _parse_idx(_idx) 424 #endif 425 { 426 debug_only( verify_construction() ); 427 NOT_PRODUCT(nodes_created++); 428 // Assert we allocated space for input array already 429 assert( _in[4] == this, "Must pass arg count to 'new'" ); 430 assert( is_not_dead(n0), "can not use dead node"); 431 assert( is_not_dead(n1), "can not use dead node"); 432 assert( is_not_dead(n2), "can not use dead node"); 433 assert( is_not_dead(n3), "can not use dead node"); 434 assert( is_not_dead(n4), "can not use dead node"); 435 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 436 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 437 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 438 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 439 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 440 } 441 442 //------------------------------Node------------------------------------------- 443 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 444 Node *n4, Node *n5) 445 : _idx(IDX_INIT(6)) 446 #ifdef ASSERT 447 , _parse_idx(_idx) 448 #endif 449 { 450 debug_only( verify_construction() ); 451 NOT_PRODUCT(nodes_created++); 452 // Assert we allocated space for input array already 453 assert( _in[5] == this, "Must pass arg count to 'new'" ); 454 assert( is_not_dead(n0), "can not use dead node"); 455 assert( is_not_dead(n1), "can not use dead node"); 456 assert( is_not_dead(n2), "can not use dead node"); 457 assert( is_not_dead(n3), "can not use dead node"); 458 assert( is_not_dead(n4), "can not use dead node"); 459 assert( is_not_dead(n5), "can not use dead node"); 460 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 461 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 462 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 463 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 464 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 465 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 466 } 467 468 //------------------------------Node------------------------------------------- 469 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, 470 Node *n4, Node *n5, Node *n6) 471 : _idx(IDX_INIT(7)) 472 #ifdef ASSERT 473 , _parse_idx(_idx) 474 #endif 475 { 476 debug_only( verify_construction() ); 477 NOT_PRODUCT(nodes_created++); 478 // Assert we allocated space for input array already 479 assert( _in[6] == this, "Must pass arg count to 'new'" ); 480 assert( is_not_dead(n0), "can not use dead node"); 481 assert( is_not_dead(n1), "can not use dead node"); 482 assert( is_not_dead(n2), "can not use dead node"); 483 assert( is_not_dead(n3), "can not use dead node"); 484 assert( is_not_dead(n4), "can not use dead node"); 485 assert( is_not_dead(n5), "can not use dead node"); 486 assert( is_not_dead(n6), "can not use dead node"); 487 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); 488 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); 489 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); 490 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); 491 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); 492 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); 493 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); 494 } 495 496 497 //------------------------------clone------------------------------------------ 498 // Clone a Node. 499 Node *Node::clone() const { 500 Compile* C = Compile::current(); 501 uint s = size_of(); // Size of inherited Node 502 Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*)); 503 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s); 504 // Set the new input pointer array 505 n->_in = (Node**)(((char*)n)+s); 506 // Cannot share the old output pointer array, so kill it 507 n->_out = NO_OUT_ARRAY; 508 // And reset the counters to 0 509 n->_outcnt = 0; 510 n->_outmax = 0; 511 // Unlock this guy, since he is not in any hash table. 512 debug_only(n->_hash_lock = 0); 513 // Walk the old node's input list to duplicate its edges 514 uint i; 515 for( i = 0; i < len(); i++ ) { 516 Node *x = in(i); 517 n->_in[i] = x; 518 if (x != NULL) x->add_out(n); 519 } 520 if (is_macro()) 521 C->add_macro_node(n); 522 if (is_expensive()) 523 C->add_expensive_node(n); 524 525 if (Opcode() == Op_ShenandoahWriteBarrier) { 526 C->add_shenandoah_barrier(n->as_ShenandoahBarrier()); 527 } 528 // If the cloned node is a range check dependent CastII, add it to the list. 529 CastIINode* cast = n->isa_CastII(); 530 if (cast != NULL && cast->has_range_check()) { 531 C->add_range_check_cast(cast); 532 } 533 534 n->set_idx(C->next_unique()); // Get new unique index as well 535 debug_only( n->verify_construction() ); 536 NOT_PRODUCT(nodes_created++); 537 // Do not patch over the debug_idx of a clone, because it makes it 538 // impossible to break on the clone's moment of creation. 539 //debug_only( n->set_debug_idx( debug_idx() ) ); 540 541 C->copy_node_notes_to(n, (Node*) this); 542 543 // MachNode clone 544 uint nopnds; 545 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) { 546 MachNode *mach = n->as_Mach(); 547 MachNode *mthis = this->as_Mach(); 548 // Get address of _opnd_array. 549 // It should be the same offset since it is the clone of this node. 550 MachOper **from = mthis->_opnds; 551 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) + 552 pointer_delta((const void*)from, 553 (const void*)(&mthis->_opnds), 1)); 554 mach->_opnds = to; 555 for ( uint i = 0; i < nopnds; ++i ) { 556 to[i] = from[i]->clone(C); 557 } 558 } 559 // cloning CallNode may need to clone JVMState 560 if (n->is_Call()) { 561 n->as_Call()->clone_jvms(C); 562 } 563 if (n->is_SafePoint()) { 564 n->as_SafePoint()->clone_replaced_nodes(); 565 } 566 return n; // Return the clone 567 } 568 569 //---------------------------setup_is_top-------------------------------------- 570 // Call this when changing the top node, to reassert the invariants 571 // required by Node::is_top. See Compile::set_cached_top_node. 572 void Node::setup_is_top() { 573 if (this == (Node*)Compile::current()->top()) { 574 // This node has just become top. Kill its out array. 575 _outcnt = _outmax = 0; 576 _out = NULL; // marker value for top 577 assert(is_top(), "must be top"); 578 } else { 579 if (_out == NULL) _out = NO_OUT_ARRAY; 580 assert(!is_top(), "must not be top"); 581 } 582 } 583 584 585 //------------------------------~Node------------------------------------------ 586 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 587 extern int reclaim_idx ; 588 extern int reclaim_in ; 589 extern int reclaim_node; 590 void Node::destruct() { 591 // Eagerly reclaim unique Node numberings 592 Compile* compile = Compile::current(); 593 if ((uint)_idx+1 == compile->unique()) { 594 compile->set_unique(compile->unique()-1); 595 #ifdef ASSERT 596 reclaim_idx++; 597 #endif 598 } 599 // Clear debug info: 600 Node_Notes* nn = compile->node_notes_at(_idx); 601 if (nn != NULL) nn->clear(); 602 // Walk the input array, freeing the corresponding output edges 603 _cnt = _max; // forget req/prec distinction 604 uint i; 605 for( i = 0; i < _max; i++ ) { 606 set_req(i, NULL); 607 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); 608 } 609 assert(outcnt() == 0, "deleting a node must not leave a dangling use"); 610 // See if the input array was allocated just prior to the object 611 int edge_size = _max*sizeof(void*); 612 int out_edge_size = _outmax*sizeof(void*); 613 char *edge_end = ((char*)_in) + edge_size; 614 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); 615 char *out_edge_end = out_array + out_edge_size; 616 int node_size = size_of(); 617 618 // Free the output edge array 619 if (out_edge_size > 0) { 620 #ifdef ASSERT 621 if( out_edge_end == compile->node_arena()->hwm() ) 622 reclaim_in += out_edge_size; // count reclaimed out edges with in edges 623 #endif 624 compile->node_arena()->Afree(out_array, out_edge_size); 625 } 626 627 // Free the input edge array and the node itself 628 if( edge_end == (char*)this ) { 629 #ifdef ASSERT 630 if( edge_end+node_size == compile->node_arena()->hwm() ) { 631 reclaim_in += edge_size; 632 reclaim_node+= node_size; 633 } 634 #else 635 // It was; free the input array and object all in one hit 636 compile->node_arena()->Afree(_in,edge_size+node_size); 637 #endif 638 } else { 639 640 // Free just the input array 641 #ifdef ASSERT 642 if( edge_end == compile->node_arena()->hwm() ) 643 reclaim_in += edge_size; 644 #endif 645 compile->node_arena()->Afree(_in,edge_size); 646 647 // Free just the object 648 #ifdef ASSERT 649 if( ((char*)this) + node_size == compile->node_arena()->hwm() ) 650 reclaim_node+= node_size; 651 #else 652 compile->node_arena()->Afree(this,node_size); 653 #endif 654 } 655 if (is_macro()) { 656 compile->remove_macro_node(this); 657 } 658 if (is_expensive()) { 659 compile->remove_expensive_node(this); 660 } 661 if (is_ShenandoahBarrier()) { 662 compile->remove_shenandoah_barrier(this->as_ShenandoahBarrier()); 663 } 664 CastIINode* cast = isa_CastII(); 665 if (cast != NULL && cast->has_range_check()) { 666 compile->remove_range_check_cast(cast); 667 } 668 669 if (is_SafePoint()) { 670 as_SafePoint()->delete_replaced_nodes(); 671 } 672 #ifdef ASSERT 673 // We will not actually delete the storage, but we'll make the node unusable. 674 *(address*)this = badAddress; // smash the C++ vtbl, probably 675 _in = _out = (Node**) badAddress; 676 _max = _cnt = _outmax = _outcnt = 0; 677 #endif 678 } 679 680 //------------------------------grow------------------------------------------- 681 // Grow the input array, making space for more edges 682 void Node::grow( uint len ) { 683 Arena* arena = Compile::current()->node_arena(); 684 uint new_max = _max; 685 if( new_max == 0 ) { 686 _max = 4; 687 _in = (Node**)arena->Amalloc(4*sizeof(Node*)); 688 Node** to = _in; 689 to[0] = NULL; 690 to[1] = NULL; 691 to[2] = NULL; 692 to[3] = NULL; 693 return; 694 } 695 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 696 // Trimming to limit allows a uint8 to handle up to 255 edges. 697 // Previously I was using only powers-of-2 which peaked at 128 edges. 698 //if( new_max >= limit ) new_max = limit-1; 699 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); 700 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space 701 _max = new_max; // Record new max length 702 // This assertion makes sure that Node::_max is wide enough to 703 // represent the numerical value of new_max. 704 assert(_max == new_max && _max > len, "int width of _max is too small"); 705 } 706 707 //-----------------------------out_grow---------------------------------------- 708 // Grow the input array, making space for more edges 709 void Node::out_grow( uint len ) { 710 assert(!is_top(), "cannot grow a top node's out array"); 711 Arena* arena = Compile::current()->node_arena(); 712 uint new_max = _outmax; 713 if( new_max == 0 ) { 714 _outmax = 4; 715 _out = (Node **)arena->Amalloc(4*sizeof(Node*)); 716 return; 717 } 718 while( new_max <= len ) new_max <<= 1; // Find next power-of-2 719 // Trimming to limit allows a uint8 to handle up to 255 edges. 720 // Previously I was using only powers-of-2 which peaked at 128 edges. 721 //if( new_max >= limit ) new_max = limit-1; 722 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); 723 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); 724 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space 725 _outmax = new_max; // Record new max length 726 // This assertion makes sure that Node::_max is wide enough to 727 // represent the numerical value of new_max. 728 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small"); 729 } 730 731 #ifdef ASSERT 732 //------------------------------is_dead---------------------------------------- 733 bool Node::is_dead() const { 734 // Mach and pinch point nodes may look like dead. 735 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) 736 return false; 737 for( uint i = 0; i < _max; i++ ) 738 if( _in[i] != NULL ) 739 return false; 740 dump(); 741 return true; 742 } 743 #endif 744 745 746 //------------------------------is_unreachable--------------------------------- 747 bool Node::is_unreachable(PhaseIterGVN &igvn) const { 748 assert(!is_Mach(), "doesn't work with MachNodes"); 749 return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top(); 750 } 751 752 //------------------------------add_req---------------------------------------- 753 // Add a new required input at the end 754 void Node::add_req( Node *n ) { 755 assert( is_not_dead(n), "can not use dead node"); 756 757 // Look to see if I can move precedence down one without reallocating 758 if( (_cnt >= _max) || (in(_max-1) != NULL) ) 759 grow( _max+1 ); 760 761 // Find a precedence edge to move 762 if( in(_cnt) != NULL ) { // Next precedence edge is busy? 763 uint i; 764 for( i=_cnt; i<_max; i++ ) 765 if( in(i) == NULL ) // Find the NULL at end of prec edge list 766 break; // There must be one, since we grew the array 767 _in[i] = in(_cnt); // Move prec over, making space for req edge 768 } 769 _in[_cnt++] = n; // Stuff over old prec edge 770 if (n != NULL) n->add_out((Node *)this); 771 } 772 773 //---------------------------add_req_batch------------------------------------- 774 // Add a new required input at the end 775 void Node::add_req_batch( Node *n, uint m ) { 776 assert( is_not_dead(n), "can not use dead node"); 777 // check various edge cases 778 if ((int)m <= 1) { 779 assert((int)m >= 0, "oob"); 780 if (m != 0) add_req(n); 781 return; 782 } 783 784 // Look to see if I can move precedence down one without reallocating 785 if( (_cnt+m) > _max || _in[_max-m] ) 786 grow( _max+m ); 787 788 // Find a precedence edge to move 789 if( _in[_cnt] != NULL ) { // Next precedence edge is busy? 790 uint i; 791 for( i=_cnt; i<_max; i++ ) 792 if( _in[i] == NULL ) // Find the NULL at end of prec edge list 793 break; // There must be one, since we grew the array 794 // Slide all the precs over by m positions (assume #prec << m). 795 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); 796 } 797 798 // Stuff over the old prec edges 799 for(uint i=0; i<m; i++ ) { 800 _in[_cnt++] = n; 801 } 802 803 // Insert multiple out edges on the node. 804 if (n != NULL && !n->is_top()) { 805 for(uint i=0; i<m; i++ ) { 806 n->add_out((Node *)this); 807 } 808 } 809 } 810 811 //------------------------------del_req---------------------------------------- 812 // Delete the required edge and compact the edge array 813 void Node::del_req( uint idx ) { 814 assert( idx < _cnt, "oob"); 815 assert( !VerifyHashTableKeys || _hash_lock == 0, 816 "remove node from hash table before modifying it"); 817 // First remove corresponding def-use edge 818 Node *n = in(idx); 819 if (n != NULL) n->del_out((Node *)this); 820 _in[idx] = in(--_cnt); // Compact the array 821 _in[_cnt] = NULL; // NULL out emptied slot 822 } 823 824 //------------------------------del_req_ordered-------------------------------- 825 // Delete the required edge and compact the edge array with preserved order 826 void Node::del_req_ordered( uint idx ) { 827 assert( idx < _cnt, "oob"); 828 assert( !VerifyHashTableKeys || _hash_lock == 0, 829 "remove node from hash table before modifying it"); 830 // First remove corresponding def-use edge 831 Node *n = in(idx); 832 if (n != NULL) n->del_out((Node *)this); 833 if (idx < _cnt - 1) { // Not last edge ? 834 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*))); 835 } 836 _in[--_cnt] = NULL; // NULL out emptied slot 837 } 838 839 //------------------------------ins_req---------------------------------------- 840 // Insert a new required input at the end 841 void Node::ins_req( uint idx, Node *n ) { 842 assert( is_not_dead(n), "can not use dead node"); 843 add_req(NULL); // Make space 844 assert( idx < _max, "Must have allocated enough space"); 845 // Slide over 846 if(_cnt-idx-1 > 0) { 847 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); 848 } 849 _in[idx] = n; // Stuff over old required edge 850 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge 851 } 852 853 //-----------------------------find_edge--------------------------------------- 854 int Node::find_edge(Node* n) { 855 for (uint i = 0; i < len(); i++) { 856 if (_in[i] == n) return i; 857 } 858 return -1; 859 } 860 861 //----------------------------replace_edge------------------------------------- 862 int Node::replace_edge(Node* old, Node* neww) { 863 if (old == neww) return 0; // nothing to do 864 uint nrep = 0; 865 for (uint i = 0; i < len(); i++) { 866 if (in(i) == old) { 867 if (i < req()) 868 set_req(i, neww); 869 else 870 set_prec(i, neww); 871 nrep++; 872 } 873 } 874 return nrep; 875 } 876 877 /** 878 * Replace input edges in the range pointing to 'old' node. 879 */ 880 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) { 881 if (old == neww) return 0; // nothing to do 882 uint nrep = 0; 883 for (int i = start; i < end; i++) { 884 if (in(i) == old) { 885 set_req(i, neww); 886 nrep++; 887 } 888 } 889 return nrep; 890 } 891 892 //-------------------------disconnect_inputs----------------------------------- 893 // NULL out all inputs to eliminate incoming Def-Use edges. 894 // Return the number of edges between 'n' and 'this' 895 int Node::disconnect_inputs(Node *n, Compile* C) { 896 int edges_to_n = 0; 897 898 uint cnt = req(); 899 for( uint i = 0; i < cnt; ++i ) { 900 if( in(i) == 0 ) continue; 901 if( in(i) == n ) ++edges_to_n; 902 set_req(i, NULL); 903 } 904 // Remove precedence edges if any exist 905 // Note: Safepoints may have precedence edges, even during parsing 906 if( (req() != len()) && (in(req()) != NULL) ) { 907 uint max = len(); 908 for( uint i = 0; i < max; ++i ) { 909 if( in(i) == 0 ) continue; 910 if( in(i) == n ) ++edges_to_n; 911 set_prec(i, NULL); 912 } 913 } 914 915 // Node::destruct requires all out edges be deleted first 916 // debug_only(destruct();) // no reuse benefit expected 917 if (edges_to_n == 0) { 918 C->record_dead_node(_idx); 919 } 920 return edges_to_n; 921 } 922 923 //-----------------------------uncast--------------------------------------- 924 // %%% Temporary, until we sort out CheckCastPP vs. CastPP. 925 // Strip away casting. (It is depth-limited.) 926 Node* Node::uncast() const { 927 // Should be inline: 928 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this; 929 if (is_ConstraintCast() || is_CheckCastPP()) 930 return uncast_helper(this); 931 else 932 return (Node*) this; 933 } 934 935 //---------------------------uncast_helper------------------------------------- 936 Node* Node::uncast_helper(const Node* p) { 937 #ifdef ASSERT 938 uint depth_count = 0; 939 const Node* orig_p = p; 940 #endif 941 942 while (true) { 943 #ifdef ASSERT 944 if (depth_count >= K) { 945 orig_p->dump(4); 946 if (p != orig_p) 947 p->dump(1); 948 } 949 assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); 950 #endif 951 if (p == NULL || p->req() != 2) { 952 break; 953 } else if (p->is_ConstraintCast()) { 954 p = p->in(1); 955 } else if (p->is_CheckCastPP()) { 956 p = p->in(1); 957 } else { 958 break; 959 } 960 } 961 return (Node*) p; 962 } 963 964 // Find out of current node that matches opcode. 965 Node* Node::find_out_with(int opcode) { 966 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 967 Node* use = fast_out(i); 968 if (use->Opcode() == opcode) { 969 return use; 970 } 971 } 972 return NULL; 973 } 974 975 // Return true if the current node has an out that matches opcode. 976 bool Node::has_out_with(int opcode) { 977 return (find_out_with(opcode) != NULL); 978 } 979 980 //------------------------------add_prec--------------------------------------- 981 // Add a new precedence input. Precedence inputs are unordered, with 982 // duplicates removed and NULLs packed down at the end. 983 void Node::add_prec( Node *n ) { 984 assert( is_not_dead(n), "can not use dead node"); 985 986 // Check for NULL at end 987 if( _cnt >= _max || in(_max-1) ) 988 grow( _max+1 ); 989 990 // Find a precedence edge to move 991 uint i = _cnt; 992 while( in(i) != NULL ) i++; 993 _in[i] = n; // Stuff prec edge over NULL 994 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge 995 } 996 997 //------------------------------rm_prec---------------------------------------- 998 // Remove a precedence input. Precedence inputs are unordered, with 999 // duplicates removed and NULLs packed down at the end. 1000 void Node::rm_prec( uint j ) { 1001 1002 // Find end of precedence list to pack NULLs 1003 uint i; 1004 for( i=j; i<_max; i++ ) 1005 if( !_in[i] ) // Find the NULL at end of prec edge list 1006 break; 1007 if (_in[j] != NULL) _in[j]->del_out((Node *)this); 1008 _in[j] = _in[--i]; // Move last element over removed guy 1009 _in[i] = NULL; // NULL out last element 1010 } 1011 1012 //------------------------------size_of---------------------------------------- 1013 uint Node::size_of() const { return sizeof(*this); } 1014 1015 //------------------------------ideal_reg-------------------------------------- 1016 uint Node::ideal_reg() const { return 0; } 1017 1018 //------------------------------jvms------------------------------------------- 1019 JVMState* Node::jvms() const { return NULL; } 1020 1021 #ifdef ASSERT 1022 //------------------------------jvms------------------------------------------- 1023 bool Node::verify_jvms(const JVMState* using_jvms) const { 1024 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 1025 if (jvms == using_jvms) return true; 1026 } 1027 return false; 1028 } 1029 1030 //------------------------------init_NodeProperty------------------------------ 1031 void Node::init_NodeProperty() { 1032 assert(_max_classes <= max_jushort, "too many NodeProperty classes"); 1033 assert(_max_flags <= max_jushort, "too many NodeProperty flags"); 1034 } 1035 #endif 1036 1037 //------------------------------format----------------------------------------- 1038 // Print as assembly 1039 void Node::format( PhaseRegAlloc *, outputStream *st ) const {} 1040 //------------------------------emit------------------------------------------- 1041 // Emit bytes starting at parameter 'ptr'. 1042 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {} 1043 //------------------------------size------------------------------------------- 1044 // Size of instruction in bytes 1045 uint Node::size(PhaseRegAlloc *ra_) const { return 0; } 1046 1047 //------------------------------CFG Construction------------------------------- 1048 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root, 1049 // Goto and Return. 1050 const Node *Node::is_block_proj() const { return 0; } 1051 1052 // Minimum guaranteed type 1053 const Type *Node::bottom_type() const { return Type::BOTTOM; } 1054 1055 1056 //------------------------------raise_bottom_type------------------------------ 1057 // Get the worst-case Type output for this Node. 1058 void Node::raise_bottom_type(const Type* new_type) { 1059 if (is_Type()) { 1060 TypeNode *n = this->as_Type(); 1061 if (VerifyAliases) { 1062 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1063 } 1064 n->set_type(new_type); 1065 } else if (is_Load()) { 1066 LoadNode *n = this->as_Load(); 1067 if (VerifyAliases) { 1068 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type"); 1069 } 1070 n->set_type(new_type); 1071 } 1072 } 1073 1074 //------------------------------Identity--------------------------------------- 1075 // Return a node that the given node is equivalent to. 1076 Node *Node::Identity( PhaseTransform * ) { 1077 return this; // Default to no identities 1078 } 1079 1080 //------------------------------Value------------------------------------------ 1081 // Compute a new Type for a node using the Type of the inputs. 1082 const Type *Node::Value( PhaseTransform * ) const { 1083 return bottom_type(); // Default to worst-case Type 1084 } 1085 1086 //------------------------------Ideal------------------------------------------ 1087 // 1088 // 'Idealize' the graph rooted at this Node. 1089 // 1090 // In order to be efficient and flexible there are some subtle invariants 1091 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks 1092 // these invariants, although its too slow to have on by default. If you are 1093 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN! 1094 // 1095 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this' 1096 // pointer. If ANY change is made, it must return the root of the reshaped 1097 // graph - even if the root is the same Node. Example: swapping the inputs 1098 // to an AddINode gives the same answer and same root, but you still have to 1099 // return the 'this' pointer instead of NULL. 1100 // 1101 // You cannot return an OLD Node, except for the 'this' pointer. Use the 1102 // Identity call to return an old Node; basically if Identity can find 1103 // another Node have the Ideal call make no change and return NULL. 1104 // Example: AddINode::Ideal must check for add of zero; in this case it 1105 // returns NULL instead of doing any graph reshaping. 1106 // 1107 // You cannot modify any old Nodes except for the 'this' pointer. Due to 1108 // sharing there may be other users of the old Nodes relying on their current 1109 // semantics. Modifying them will break the other users. 1110 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 1111 // "X+3" unchanged in case it is shared. 1112 // 1113 // If you modify the 'this' pointer's inputs, you should use 1114 // 'set_req'. If you are making a new Node (either as the new root or 1115 // some new internal piece) you may use 'init_req' to set the initial 1116 // value. You can make a new Node with either 'new' or 'clone'. In 1117 // either case, def-use info is correctly maintained. 1118 // 1119 // Example: reshape "(X+3)+4" into "X+7": 1120 // set_req(1, in(1)->in(1)); 1121 // set_req(2, phase->intcon(7)); 1122 // return this; 1123 // Example: reshape "X*4" into "X<<2" 1124 // return new (C) LShiftINode(in(1), phase->intcon(2)); 1125 // 1126 // You must call 'phase->transform(X)' on any new Nodes X you make, except 1127 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 1128 // Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5))); 1129 // return new (C) AddINode(shift, in(1)); 1130 // 1131 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 1132 // These forms are faster than 'phase->transform(new (C) ConNode())' and Do 1133 // The Right Thing with def-use info. 1134 // 1135 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped 1136 // graph uses the 'this' Node it must be the root. If you want a Node with 1137 // the same Opcode as the 'this' pointer use 'clone'. 1138 // 1139 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { 1140 return NULL; // Default to being Ideal already 1141 } 1142 1143 // Some nodes have specific Ideal subgraph transformations only if they are 1144 // unique users of specific nodes. Such nodes should be put on IGVN worklist 1145 // for the transformations to happen. 1146 bool Node::has_special_unique_user() const { 1147 assert(outcnt() == 1, "match only for unique out"); 1148 Node* n = unique_out(); 1149 int op = Opcode(); 1150 if( this->is_Store() ) { 1151 // Condition for back-to-back stores folding. 1152 return n->Opcode() == op && n->in(MemNode::Memory) == this; 1153 } else if (this->is_Load()) { 1154 // Condition for removing an unused LoadNode from the MemBarAcquire precedence input 1155 return n->Opcode() == Op_MemBarAcquire; 1156 } else if( op == Op_AddL ) { 1157 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y)) 1158 return n->Opcode() == Op_ConvL2I && n->in(1) == this; 1159 } else if( op == Op_SubI || op == Op_SubL ) { 1160 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y) 1161 return n->Opcode() == op && n->in(2) == this; 1162 } else if (op == Op_ShenandoahWriteBarrier) { 1163 return n->Opcode() == Op_ShenandoahWBMemProj; 1164 } 1165 return false; 1166 }; 1167 1168 //--------------------------find_exact_control--------------------------------- 1169 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 1170 Node* Node::find_exact_control(Node* ctrl) { 1171 if (ctrl == NULL && this->is_Region()) 1172 ctrl = this->as_Region()->is_copy(); 1173 1174 if (ctrl != NULL && ctrl->is_CatchProj()) { 1175 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) 1176 ctrl = ctrl->in(0); 1177 if (ctrl != NULL && !ctrl->is_top()) 1178 ctrl = ctrl->in(0); 1179 } 1180 1181 if (ctrl != NULL && ctrl->is_Proj()) 1182 ctrl = ctrl->in(0); 1183 1184 return ctrl; 1185 } 1186 1187 //--------------------------dominates------------------------------------------ 1188 // Helper function for MemNode::all_controls_dominate(). 1189 // Check if 'this' control node dominates or equal to 'sub' control node. 1190 // We already know that if any path back to Root or Start reaches 'this', 1191 // then all paths so, so this is a simple search for one example, 1192 // not an exhaustive search for a counterexample. 1193 bool Node::dominates(Node* sub, Node_List &nlist) { 1194 assert(this->is_CFG(), "expecting control"); 1195 assert(sub != NULL && sub->is_CFG(), "expecting control"); 1196 1197 // detect dead cycle without regions 1198 int iterations_without_region_limit = DominatorSearchLimit; 1199 1200 Node* orig_sub = sub; 1201 Node* dom = this; 1202 bool met_dom = false; 1203 nlist.clear(); 1204 1205 // Walk 'sub' backward up the chain to 'dom', watching for regions. 1206 // After seeing 'dom', continue up to Root or Start. 1207 // If we hit a region (backward split point), it may be a loop head. 1208 // Keep going through one of the region's inputs. If we reach the 1209 // same region again, go through a different input. Eventually we 1210 // will either exit through the loop head, or give up. 1211 // (If we get confused, break out and return a conservative 'false'.) 1212 while (sub != NULL) { 1213 if (sub->is_top()) break; // Conservative answer for dead code. 1214 if (sub == dom) { 1215 if (nlist.size() == 0) { 1216 // No Region nodes except loops were visited before and the EntryControl 1217 // path was taken for loops: it did not walk in a cycle. 1218 return true; 1219 } else if (met_dom) { 1220 break; // already met before: walk in a cycle 1221 } else { 1222 // Region nodes were visited. Continue walk up to Start or Root 1223 // to make sure that it did not walk in a cycle. 1224 met_dom = true; // first time meet 1225 iterations_without_region_limit = DominatorSearchLimit; // Reset 1226 } 1227 } 1228 if (sub->is_Start() || sub->is_Root()) { 1229 // Success if we met 'dom' along a path to Start or Root. 1230 // We assume there are no alternative paths that avoid 'dom'. 1231 // (This assumption is up to the caller to ensure!) 1232 return met_dom; 1233 } 1234 Node* up = sub->in(0); 1235 // Normalize simple pass-through regions and projections: 1236 up = sub->find_exact_control(up); 1237 // If sub == up, we found a self-loop. Try to push past it. 1238 if (sub == up && sub->is_Loop()) { 1239 // Take loop entry path on the way up to 'dom'. 1240 up = sub->in(1); // in(LoopNode::EntryControl); 1241 } else if (sub == up && sub->is_Region() && sub->req() != 3) { 1242 // Always take in(1) path on the way up to 'dom' for clone regions 1243 // (with only one input) or regions which merge > 2 paths 1244 // (usually used to merge fast/slow paths). 1245 up = sub->in(1); 1246 } else if (sub == up && sub->is_Region()) { 1247 // Try both paths for Regions with 2 input paths (it may be a loop head). 1248 // It could give conservative 'false' answer without information 1249 // which region's input is the entry path. 1250 iterations_without_region_limit = DominatorSearchLimit; // Reset 1251 1252 bool region_was_visited_before = false; 1253 // Was this Region node visited before? 1254 // If so, we have reached it because we accidentally took a 1255 // loop-back edge from 'sub' back into the body of the loop, 1256 // and worked our way up again to the loop header 'sub'. 1257 // So, take the first unexplored path on the way up to 'dom'. 1258 for (int j = nlist.size() - 1; j >= 0; j--) { 1259 intptr_t ni = (intptr_t)nlist.at(j); 1260 Node* visited = (Node*)(ni & ~1); 1261 bool visited_twice_already = ((ni & 1) != 0); 1262 if (visited == sub) { 1263 if (visited_twice_already) { 1264 // Visited 2 paths, but still stuck in loop body. Give up. 1265 return false; 1266 } 1267 // The Region node was visited before only once. 1268 // (We will repush with the low bit set, below.) 1269 nlist.remove(j); 1270 // We will find a new edge and re-insert. 1271 region_was_visited_before = true; 1272 break; 1273 } 1274 } 1275 1276 // Find an incoming edge which has not been seen yet; walk through it. 1277 assert(up == sub, ""); 1278 uint skip = region_was_visited_before ? 1 : 0; 1279 for (uint i = 1; i < sub->req(); i++) { 1280 Node* in = sub->in(i); 1281 if (in != NULL && !in->is_top() && in != sub) { 1282 if (skip == 0) { 1283 up = in; 1284 break; 1285 } 1286 --skip; // skip this nontrivial input 1287 } 1288 } 1289 1290 // Set 0 bit to indicate that both paths were taken. 1291 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0))); 1292 } 1293 1294 if (up == sub) { 1295 break; // some kind of tight cycle 1296 } 1297 if (up == orig_sub && met_dom) { 1298 // returned back after visiting 'dom' 1299 break; // some kind of cycle 1300 } 1301 if (--iterations_without_region_limit < 0) { 1302 break; // dead cycle 1303 } 1304 sub = up; 1305 } 1306 1307 // Did not meet Root or Start node in pred. chain. 1308 // Conservative answer for dead code. 1309 return false; 1310 } 1311 1312 //------------------------------remove_dead_region----------------------------- 1313 // This control node is dead. Follow the subgraph below it making everything 1314 // using it dead as well. This will happen normally via the usual IterGVN 1315 // worklist but this call is more efficient. Do not update use-def info 1316 // inside the dead region, just at the borders. 1317 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { 1318 // Con's are a popular node to re-hit in the hash table again. 1319 if( dead->is_Con() ) return; 1320 1321 // Can't put ResourceMark here since igvn->_worklist uses the same arena 1322 // for verify pass with +VerifyOpto and we add/remove elements in it here. 1323 Node_List nstack(Thread::current()->resource_area()); 1324 1325 Node *top = igvn->C->top(); 1326 nstack.push(dead); 1327 bool has_irreducible_loop = igvn->C->has_irreducible_loop(); 1328 1329 while (nstack.size() > 0) { 1330 dead = nstack.pop(); 1331 if (dead->outcnt() > 0) { 1332 // Keep dead node on stack until all uses are processed. 1333 nstack.push(dead); 1334 // For all Users of the Dead... ;-) 1335 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) { 1336 Node* use = dead->last_out(k); 1337 igvn->hash_delete(use); // Yank from hash table prior to mod 1338 if (use->in(0) == dead) { // Found another dead node 1339 assert (!use->is_Con(), "Control for Con node should be Root node."); 1340 use->set_req(0, top); // Cut dead edge to prevent processing 1341 nstack.push(use); // the dead node again. 1342 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop 1343 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode) 1344 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead 1345 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing 1346 use->set_req(0, top); // Cut self edge 1347 nstack.push(use); 1348 } else { // Else found a not-dead user 1349 // Dead if all inputs are top or null 1350 bool dead_use = !use->is_Root(); // Keep empty graph alive 1351 for (uint j = 1; j < use->req(); j++) { 1352 Node* in = use->in(j); 1353 if (in == dead) { // Turn all dead inputs into TOP 1354 use->set_req(j, top); 1355 } else if (in != NULL && !in->is_top()) { 1356 dead_use = false; 1357 } 1358 } 1359 if (dead_use) { 1360 if (use->is_Region()) { 1361 use->set_req(0, top); // Cut self edge 1362 } 1363 nstack.push(use); 1364 } else { 1365 igvn->_worklist.push(use); 1366 } 1367 } 1368 // Refresh the iterator, since any number of kills might have happened. 1369 k = dead->last_outs(kmin); 1370 } 1371 } else { // (dead->outcnt() == 0) 1372 // Done with outputs. 1373 igvn->hash_delete(dead); 1374 igvn->_worklist.remove(dead); 1375 igvn->set_type(dead, Type::TOP); 1376 if (dead->is_macro()) { 1377 igvn->C->remove_macro_node(dead); 1378 } 1379 if (dead->is_expensive()) { 1380 igvn->C->remove_expensive_node(dead); 1381 } 1382 if (dead->is_ShenandoahBarrier()) { 1383 igvn->C->remove_shenandoah_barrier(dead->as_ShenandoahBarrier()); 1384 } 1385 CastIINode* cast = dead->isa_CastII(); 1386 if (cast != NULL && cast->has_range_check()) { 1387 igvn->C->remove_range_check_cast(cast); 1388 } 1389 igvn->C->record_dead_node(dead->_idx); 1390 // Kill all inputs to the dead guy 1391 for (uint i=0; i < dead->req(); i++) { 1392 Node *n = dead->in(i); // Get input to dead guy 1393 if (n != NULL && !n->is_top()) { // Input is valid? 1394 dead->set_req(i, top); // Smash input away 1395 if (n->outcnt() == 0) { // Input also goes dead? 1396 if (!n->is_Con()) 1397 nstack.push(n); // Clear it out as well 1398 } else if (n->outcnt() == 1 && 1399 n->has_special_unique_user()) { 1400 igvn->add_users_to_worklist( n ); 1401 } else if (n->outcnt() <= 2 && n->is_Store()) { 1402 // Push store's uses on worklist to enable folding optimization for 1403 // store/store and store/load to the same address. 1404 // The restriction (outcnt() <= 2) is the same as in set_req_X() 1405 // and remove_globally_dead_node(). 1406 igvn->add_users_to_worklist( n ); 1407 } else if (n->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(n)) { 1408 igvn->add_users_to_worklist(n); 1409 } 1410 } 1411 } 1412 } // (dead->outcnt() == 0) 1413 } // while (nstack.size() > 0) for outputs 1414 return; 1415 } 1416 1417 //------------------------------remove_dead_region----------------------------- 1418 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { 1419 Node *n = in(0); 1420 if( !n ) return false; 1421 // Lost control into this guy? I.e., it became unreachable? 1422 // Aggressively kill all unreachable code. 1423 if (can_reshape && n->is_top()) { 1424 kill_dead_code(this, phase->is_IterGVN()); 1425 return false; // Node is dead. 1426 } 1427 1428 if( n->is_Region() && n->as_Region()->is_copy() ) { 1429 Node *m = n->nonnull_req(); 1430 set_req(0, m); 1431 return true; 1432 } 1433 return false; 1434 } 1435 1436 //------------------------------hash------------------------------------------- 1437 // Hash function over Nodes. 1438 uint Node::hash() const { 1439 uint sum = 0; 1440 for( uint i=0; i<_cnt; i++ ) // Add in all inputs 1441 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs 1442 return (sum>>2) + _cnt + Opcode(); 1443 } 1444 1445 //------------------------------cmp-------------------------------------------- 1446 // Compare special parts of simple Nodes 1447 uint Node::cmp( const Node &n ) const { 1448 return 1; // Must be same 1449 } 1450 1451 //------------------------------rematerialize----------------------------------- 1452 // Should we clone rather than spill this instruction? 1453 bool Node::rematerialize() const { 1454 if ( is_Mach() ) 1455 return this->as_Mach()->rematerialize(); 1456 else 1457 return (_flags & Flag_rematerialize) != 0; 1458 } 1459 1460 //------------------------------needs_anti_dependence_check--------------------- 1461 // Nodes which use memory without consuming it, hence need antidependences. 1462 bool Node::needs_anti_dependence_check() const { 1463 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 ) 1464 return false; 1465 else 1466 return in(1)->bottom_type()->has_memory(); 1467 } 1468 1469 1470 // Get an integer constant from a ConNode (or CastIINode). 1471 // Return a default value if there is no apparent constant here. 1472 const TypeInt* Node::find_int_type() const { 1473 if (this->is_Type()) { 1474 return this->as_Type()->type()->isa_int(); 1475 } else if (this->is_Con()) { 1476 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1477 return this->bottom_type()->isa_int(); 1478 } 1479 return NULL; 1480 } 1481 1482 // Get a pointer constant from a ConstNode. 1483 // Returns the constant if it is a pointer ConstNode 1484 intptr_t Node::get_ptr() const { 1485 assert( Opcode() == Op_ConP, "" ); 1486 return ((ConPNode*)this)->type()->is_ptr()->get_con(); 1487 } 1488 1489 // Get a narrow oop constant from a ConNNode. 1490 intptr_t Node::get_narrowcon() const { 1491 assert( Opcode() == Op_ConN, "" ); 1492 return ((ConNNode*)this)->type()->is_narrowoop()->get_con(); 1493 } 1494 1495 // Get a long constant from a ConNode. 1496 // Return a default value if there is no apparent constant here. 1497 const TypeLong* Node::find_long_type() const { 1498 if (this->is_Type()) { 1499 return this->as_Type()->type()->isa_long(); 1500 } else if (this->is_Con()) { 1501 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); 1502 return this->bottom_type()->isa_long(); 1503 } 1504 return NULL; 1505 } 1506 1507 1508 /** 1509 * Return a ptr type for nodes which should have it. 1510 */ 1511 const TypePtr* Node::get_ptr_type() const { 1512 const TypePtr* tp = this->bottom_type()->make_ptr(); 1513 #ifdef ASSERT 1514 if (tp == NULL) { 1515 this->dump(1); 1516 assert((tp != NULL), "unexpected node type"); 1517 } 1518 #endif 1519 return tp; 1520 } 1521 1522 // Get a double constant from a ConstNode. 1523 // Returns the constant if it is a double ConstNode 1524 jdouble Node::getd() const { 1525 assert( Opcode() == Op_ConD, "" ); 1526 return ((ConDNode*)this)->type()->is_double_constant()->getd(); 1527 } 1528 1529 // Get a float constant from a ConstNode. 1530 // Returns the constant if it is a float ConstNode 1531 jfloat Node::getf() const { 1532 assert( Opcode() == Op_ConF, "" ); 1533 return ((ConFNode*)this)->type()->is_float_constant()->getf(); 1534 } 1535 1536 #ifndef PRODUCT 1537 1538 //----------------------------NotANode---------------------------------------- 1539 // Used in debugging code to avoid walking across dead or uninitialized edges. 1540 static inline bool NotANode(const Node* n) { 1541 if (n == NULL) return true; 1542 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. 1543 if (*(address*)n == badAddress) return true; // kill by Node::destruct 1544 return false; 1545 } 1546 1547 1548 //------------------------------find------------------------------------------ 1549 // Find a neighbor of this Node with the given _idx 1550 // If idx is negative, find its absolute value, following both _in and _out. 1551 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, 1552 VectorSet* old_space, VectorSet* new_space ) { 1553 int node_idx = (idx >= 0) ? idx : -idx; 1554 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. 1555 // Contained in new_space or old_space? Check old_arena first since it's mostly empty. 1556 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; 1557 if( v->test(n->_idx) ) return; 1558 if( (int)n->_idx == node_idx 1559 debug_only(|| n->debug_idx() == node_idx) ) { 1560 if (result != NULL) 1561 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n", 1562 (uintptr_t)result, (uintptr_t)n, node_idx); 1563 result = n; 1564 } 1565 v->set(n->_idx); 1566 for( uint i=0; i<n->len(); i++ ) { 1567 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; 1568 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); 1569 } 1570 // Search along forward edges also: 1571 if (idx < 0 && !only_ctrl) { 1572 for( uint j=0; j<n->outcnt(); j++ ) { 1573 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); 1574 } 1575 } 1576 #ifdef ASSERT 1577 // Search along debug_orig edges last, checking for cycles 1578 Node* orig = n->debug_orig(); 1579 if (orig != NULL) { 1580 do { 1581 if (NotANode(orig)) break; 1582 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); 1583 orig = orig->debug_orig(); 1584 } while (orig != NULL && orig != n->debug_orig()); 1585 } 1586 #endif //ASSERT 1587 } 1588 1589 // call this from debugger: 1590 Node* find_node(Node* n, int idx) { 1591 return n->find(idx); 1592 } 1593 1594 //------------------------------find------------------------------------------- 1595 Node* Node::find(int idx) const { 1596 ResourceArea *area = Thread::current()->resource_area(); 1597 VectorSet old_space(area), new_space(area); 1598 Node* result = NULL; 1599 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); 1600 return result; 1601 } 1602 1603 //------------------------------find_ctrl-------------------------------------- 1604 // Find an ancestor to this node in the control history with given _idx 1605 Node* Node::find_ctrl(int idx) const { 1606 ResourceArea *area = Thread::current()->resource_area(); 1607 VectorSet old_space(area), new_space(area); 1608 Node* result = NULL; 1609 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); 1610 return result; 1611 } 1612 #endif 1613 1614 1615 1616 #ifndef PRODUCT 1617 1618 // -----------------------------Name------------------------------------------- 1619 extern const char *NodeClassNames[]; 1620 const char *Node::Name() const { return NodeClassNames[Opcode()]; } 1621 1622 static bool is_disconnected(const Node* n) { 1623 for (uint i = 0; i < n->req(); i++) { 1624 if (n->in(i) != NULL) return false; 1625 } 1626 return true; 1627 } 1628 1629 #ifdef ASSERT 1630 static void dump_orig(Node* orig, outputStream *st) { 1631 Compile* C = Compile::current(); 1632 if (NotANode(orig)) orig = NULL; 1633 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1634 if (orig == NULL) return; 1635 st->print(" !orig="); 1636 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops 1637 if (NotANode(fast)) fast = NULL; 1638 while (orig != NULL) { 1639 bool discon = is_disconnected(orig); // if discon, print [123] else 123 1640 if (discon) st->print("["); 1641 if (!Compile::current()->node_arena()->contains(orig)) 1642 st->print("o"); 1643 st->print("%d", orig->_idx); 1644 if (discon) st->print("]"); 1645 orig = orig->debug_orig(); 1646 if (NotANode(orig)) orig = NULL; 1647 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; 1648 if (orig != NULL) st->print(","); 1649 if (fast != NULL) { 1650 // Step fast twice for each single step of orig: 1651 fast = fast->debug_orig(); 1652 if (NotANode(fast)) fast = NULL; 1653 if (fast != NULL && fast != orig) { 1654 fast = fast->debug_orig(); 1655 if (NotANode(fast)) fast = NULL; 1656 } 1657 if (fast == orig) { 1658 st->print("..."); 1659 break; 1660 } 1661 } 1662 } 1663 } 1664 1665 void Node::set_debug_orig(Node* orig) { 1666 _debug_orig = orig; 1667 if (BreakAtNode == 0) return; 1668 if (NotANode(orig)) orig = NULL; 1669 int trip = 10; 1670 while (orig != NULL) { 1671 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { 1672 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", 1673 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); 1674 BREAKPOINT; 1675 } 1676 orig = orig->debug_orig(); 1677 if (NotANode(orig)) orig = NULL; 1678 if (trip-- <= 0) break; 1679 } 1680 } 1681 #endif //ASSERT 1682 1683 //------------------------------dump------------------------------------------ 1684 // Dump a Node 1685 void Node::dump(const char* suffix, outputStream *st) const { 1686 Compile* C = Compile::current(); 1687 bool is_new = C->node_arena()->contains(this); 1688 C->_in_dump_cnt++; 1689 st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name()); 1690 1691 // Dump the required and precedence inputs 1692 dump_req(st); 1693 dump_prec(st); 1694 // Dump the outputs 1695 dump_out(st); 1696 1697 if (is_disconnected(this)) { 1698 #ifdef ASSERT 1699 st->print(" [%d]",debug_idx()); 1700 dump_orig(debug_orig(), st); 1701 #endif 1702 st->cr(); 1703 C->_in_dump_cnt--; 1704 return; // don't process dead nodes 1705 } 1706 1707 // Dump node-specific info 1708 dump_spec(st); 1709 #ifdef ASSERT 1710 // Dump the non-reset _debug_idx 1711 if (Verbose && WizardMode) { 1712 st->print(" [%d]",debug_idx()); 1713 } 1714 #endif 1715 1716 const Type *t = bottom_type(); 1717 1718 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) { 1719 const TypeInstPtr *toop = t->isa_instptr(); 1720 const TypeKlassPtr *tkls = t->isa_klassptr(); 1721 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL ); 1722 if (klass && klass->is_loaded() && klass->is_interface()) { 1723 st->print(" Interface:"); 1724 } else if (toop) { 1725 st->print(" Oop:"); 1726 } else if (tkls) { 1727 st->print(" Klass:"); 1728 } 1729 t->dump_on(st); 1730 } else if (t == Type::MEMORY) { 1731 st->print(" Memory:"); 1732 MemNode::dump_adr_type(this, adr_type(), st); 1733 } else if (Verbose || WizardMode) { 1734 st->print(" Type:"); 1735 if (t) { 1736 t->dump_on(st); 1737 } else { 1738 st->print("no type"); 1739 } 1740 } else if (t->isa_vect() && this->is_MachSpillCopy()) { 1741 // Dump MachSpillcopy vector type. 1742 t->dump_on(st); 1743 } 1744 if (is_new) { 1745 debug_only(dump_orig(debug_orig(), st)); 1746 Node_Notes* nn = C->node_notes_at(_idx); 1747 if (nn != NULL && !nn->is_clear()) { 1748 if (nn->jvms() != NULL) { 1749 st->print(" !jvms:"); 1750 nn->jvms()->dump_spec(st); 1751 } 1752 } 1753 } 1754 if (suffix) st->print("%s", suffix); 1755 C->_in_dump_cnt--; 1756 } 1757 1758 //------------------------------dump_req-------------------------------------- 1759 void Node::dump_req(outputStream *st) const { 1760 // Dump the required input edges 1761 for (uint i = 0; i < req(); i++) { // For all required inputs 1762 Node* d = in(i); 1763 if (d == NULL) { 1764 st->print("_ "); 1765 } else if (NotANode(d)) { 1766 st->print("NotANode "); // uninitialized, sentinel, garbage, etc. 1767 } else { 1768 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx); 1769 } 1770 } 1771 } 1772 1773 1774 //------------------------------dump_prec------------------------------------- 1775 void Node::dump_prec(outputStream *st) const { 1776 // Dump the precedence edges 1777 int any_prec = 0; 1778 for (uint i = req(); i < len(); i++) { // For all precedence inputs 1779 Node* p = in(i); 1780 if (p != NULL) { 1781 if (!any_prec++) st->print(" |"); 1782 if (NotANode(p)) { st->print("NotANode "); continue; } 1783 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1784 } 1785 } 1786 } 1787 1788 //------------------------------dump_out-------------------------------------- 1789 void Node::dump_out(outputStream *st) const { 1790 // Delimit the output edges 1791 st->print(" [["); 1792 // Dump the output edges 1793 for (uint i = 0; i < _outcnt; i++) { // For all outputs 1794 Node* u = _out[i]; 1795 if (u == NULL) { 1796 st->print("_ "); 1797 } else if (NotANode(u)) { 1798 st->print("NotANode "); 1799 } else { 1800 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx); 1801 } 1802 } 1803 st->print("]] "); 1804 } 1805 1806 //------------------------------dump_nodes------------------------------------- 1807 static void dump_nodes(const Node* start, int d, bool only_ctrl) { 1808 Node* s = (Node*)start; // remove const 1809 if (NotANode(s)) return; 1810 1811 uint depth = (uint)ABS(d); 1812 int direction = d; 1813 Compile* C = Compile::current(); 1814 GrowableArray <Node *> nstack(C->live_nodes()); 1815 1816 nstack.append(s); 1817 int begin = 0; 1818 int end = 0; 1819 for(uint i = 0; i < depth; i++) { 1820 end = nstack.length(); 1821 for(int j = begin; j < end; j++) { 1822 Node* tp = nstack.at(j); 1823 uint limit = direction > 0 ? tp->len() : tp->outcnt(); 1824 for(uint k = 0; k < limit; k++) { 1825 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k); 1826 1827 if (NotANode(n)) continue; 1828 // do not recurse through top or the root (would reach unrelated stuff) 1829 if (n->is_Root() || n->is_top()) continue; 1830 if (only_ctrl && !n->is_CFG()) continue; 1831 1832 bool on_stack = nstack.contains(n); 1833 if (!on_stack) { 1834 nstack.append(n); 1835 } 1836 } 1837 } 1838 begin = end; 1839 } 1840 end = nstack.length(); 1841 if (direction > 0) { 1842 for(int j = end-1; j >= 0; j--) { 1843 nstack.at(j)->dump(); 1844 } 1845 } else { 1846 for(int j = 0; j < end; j++) { 1847 nstack.at(j)->dump(); 1848 } 1849 } 1850 } 1851 1852 //------------------------------dump------------------------------------------- 1853 void Node::dump(int d) const { 1854 dump_nodes(this, d, false); 1855 } 1856 1857 //------------------------------dump_ctrl-------------------------------------- 1858 // Dump a Node's control history to depth 1859 void Node::dump_ctrl(int d) const { 1860 dump_nodes(this, d, true); 1861 } 1862 1863 // VERIFICATION CODE 1864 // For each input edge to a node (ie - for each Use-Def edge), verify that 1865 // there is a corresponding Def-Use edge. 1866 //------------------------------verify_edges----------------------------------- 1867 void Node::verify_edges(Unique_Node_List &visited) { 1868 uint i, j, idx; 1869 int cnt; 1870 Node *n; 1871 1872 // Recursive termination test 1873 if (visited.member(this)) return; 1874 visited.push(this); 1875 1876 // Walk over all input edges, checking for correspondence 1877 for( i = 0; i < len(); i++ ) { 1878 n = in(i); 1879 if (n != NULL && !n->is_top()) { 1880 // Count instances of (Node *)this 1881 cnt = 0; 1882 for (idx = 0; idx < n->_outcnt; idx++ ) { 1883 if (n->_out[idx] == (Node *)this) cnt++; 1884 } 1885 assert( cnt > 0,"Failed to find Def-Use edge." ); 1886 // Check for duplicate edges 1887 // walk the input array downcounting the input edges to n 1888 for( j = 0; j < len(); j++ ) { 1889 if( in(j) == n ) cnt--; 1890 } 1891 assert( cnt == 0,"Mismatched edge count."); 1892 } else if (n == NULL) { 1893 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges"); 1894 } else { 1895 assert(n->is_top(), "sanity"); 1896 // Nothing to check. 1897 } 1898 } 1899 // Recursive walk over all input edges 1900 for( i = 0; i < len(); i++ ) { 1901 n = in(i); 1902 if( n != NULL ) 1903 in(i)->verify_edges(visited); 1904 } 1905 } 1906 1907 //------------------------------verify_recur----------------------------------- 1908 static const Node *unique_top = NULL; 1909 1910 void Node::verify_recur(const Node *n, int verify_depth, 1911 VectorSet &old_space, VectorSet &new_space) { 1912 if ( verify_depth == 0 ) return; 1913 if (verify_depth > 0) --verify_depth; 1914 1915 Compile* C = Compile::current(); 1916 1917 // Contained in new_space or old_space? 1918 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 1919 // Check for visited in the proper space. Numberings are not unique 1920 // across spaces so we need a separate VectorSet for each space. 1921 if( v->test_set(n->_idx) ) return; 1922 1923 if (n->is_Con() && n->bottom_type() == Type::TOP) { 1924 if (C->cached_top_node() == NULL) 1925 C->set_cached_top_node((Node*)n); 1926 assert(C->cached_top_node() == n, "TOP node must be unique"); 1927 } 1928 1929 for( uint i = 0; i < n->len(); i++ ) { 1930 Node *x = n->in(i); 1931 if (!x || x->is_top()) continue; 1932 1933 // Verify my input has a def-use edge to me 1934 if (true /*VerifyDefUse*/) { 1935 // Count use-def edges from n to x 1936 int cnt = 0; 1937 for( uint j = 0; j < n->len(); j++ ) 1938 if( n->in(j) == x ) 1939 cnt++; 1940 // Count def-use edges from x to n 1941 uint max = x->_outcnt; 1942 for( uint k = 0; k < max; k++ ) 1943 if (x->_out[k] == n) 1944 cnt--; 1945 assert( cnt == 0, "mismatched def-use edge counts" ); 1946 } 1947 1948 verify_recur(x, verify_depth, old_space, new_space); 1949 } 1950 1951 } 1952 1953 //------------------------------verify----------------------------------------- 1954 // Check Def-Use info for my subgraph 1955 void Node::verify() const { 1956 Compile* C = Compile::current(); 1957 Node* old_top = C->cached_top_node(); 1958 ResourceMark rm; 1959 ResourceArea *area = Thread::current()->resource_area(); 1960 VectorSet old_space(area), new_space(area); 1961 verify_recur(this, -1, old_space, new_space); 1962 C->set_cached_top_node(old_top); 1963 } 1964 #endif 1965 1966 1967 //------------------------------walk------------------------------------------- 1968 // Graph walk, with both pre-order and post-order functions 1969 void Node::walk(NFunc pre, NFunc post, void *env) { 1970 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk 1971 walk_(pre, post, env, visited); 1972 } 1973 1974 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) { 1975 if( visited.test_set(_idx) ) return; 1976 pre(*this,env); // Call the pre-order walk function 1977 for( uint i=0; i<_max; i++ ) 1978 if( in(i) ) // Input exists and is not walked? 1979 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions 1980 post(*this,env); // Call the post-order walk function 1981 } 1982 1983 void Node::nop(Node &, void*) {} 1984 1985 //------------------------------Registers-------------------------------------- 1986 // Do we Match on this edge index or not? Generally false for Control 1987 // and true for everything else. Weird for calls & returns. 1988 uint Node::match_edge(uint idx) const { 1989 return idx; // True for other than index 0 (control) 1990 } 1991 1992 static RegMask _not_used_at_all; 1993 // Register classes are defined for specific machines 1994 const RegMask &Node::out_RegMask() const { 1995 ShouldNotCallThis(); 1996 return _not_used_at_all; 1997 } 1998 1999 const RegMask &Node::in_RegMask(uint) const { 2000 ShouldNotCallThis(); 2001 return _not_used_at_all; 2002 } 2003 2004 //============================================================================= 2005 //----------------------------------------------------------------------------- 2006 void Node_Array::reset( Arena *new_arena ) { 2007 _a->Afree(_nodes,_max*sizeof(Node*)); 2008 _max = 0; 2009 _nodes = NULL; 2010 _a = new_arena; 2011 } 2012 2013 //------------------------------clear------------------------------------------ 2014 // Clear all entries in _nodes to NULL but keep storage 2015 void Node_Array::clear() { 2016 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) ); 2017 } 2018 2019 //----------------------------------------------------------------------------- 2020 void Node_Array::grow( uint i ) { 2021 if( !_max ) { 2022 _max = 1; 2023 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) ); 2024 _nodes[0] = NULL; 2025 } 2026 uint old = _max; 2027 while( i >= _max ) _max <<= 1; // Double to fit 2028 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*)); 2029 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) ); 2030 } 2031 2032 //----------------------------------------------------------------------------- 2033 void Node_Array::insert( uint i, Node *n ) { 2034 if( _nodes[_max-1] ) grow(_max); // Get more space if full 2035 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*))); 2036 _nodes[i] = n; 2037 } 2038 2039 //----------------------------------------------------------------------------- 2040 void Node_Array::remove( uint i ) { 2041 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*))); 2042 _nodes[_max-1] = NULL; 2043 } 2044 2045 //----------------------------------------------------------------------------- 2046 void Node_Array::sort( C_sort_func_t func) { 2047 qsort( _nodes, _max, sizeof( Node* ), func ); 2048 } 2049 2050 //----------------------------------------------------------------------------- 2051 void Node_Array::dump() const { 2052 #ifndef PRODUCT 2053 for( uint i = 0; i < _max; i++ ) { 2054 Node *nn = _nodes[i]; 2055 if( nn != NULL ) { 2056 tty->print("%5d--> ",i); nn->dump(); 2057 } 2058 } 2059 #endif 2060 } 2061 2062 //--------------------------is_iteratively_computed------------------------------ 2063 // Operation appears to be iteratively computed (such as an induction variable) 2064 // It is possible for this operation to return false for a loop-varying 2065 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 2066 bool Node::is_iteratively_computed() { 2067 if (ideal_reg()) { // does operation have a result register? 2068 for (uint i = 1; i < req(); i++) { 2069 Node* n = in(i); 2070 if (n != NULL && n->is_Phi()) { 2071 for (uint j = 1; j < n->req(); j++) { 2072 if (n->in(j) == this) { 2073 return true; 2074 } 2075 } 2076 } 2077 } 2078 } 2079 return false; 2080 } 2081 2082 //--------------------------find_similar------------------------------ 2083 // Return a node with opcode "opc" and same inputs as "this" if one can 2084 // be found; Otherwise return NULL; 2085 Node* Node::find_similar(int opc) { 2086 if (req() >= 2) { 2087 Node* def = in(1); 2088 if (def && def->outcnt() >= 2) { 2089 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) { 2090 Node* use = def->fast_out(i); 2091 if (use->Opcode() == opc && 2092 use->req() == req()) { 2093 uint j; 2094 for (j = 0; j < use->req(); j++) { 2095 if (use->in(j) != in(j)) { 2096 break; 2097 } 2098 } 2099 if (j == use->req()) { 2100 return use; 2101 } 2102 } 2103 } 2104 } 2105 } 2106 return NULL; 2107 } 2108 2109 2110 //--------------------------unique_ctrl_out------------------------------ 2111 // Return the unique control out if only one. Null if none or more than one. 2112 Node* Node::unique_ctrl_out() { 2113 Node* found = NULL; 2114 for (uint i = 0; i < outcnt(); i++) { 2115 Node* use = raw_out(i); 2116 if (use->is_CFG() && use != this) { 2117 if (found != NULL) return NULL; 2118 found = use; 2119 } 2120 } 2121 return found; 2122 } 2123 2124 void Node::ensure_control_or_add_prec(Node* c) { 2125 if (in(0) == NULL) { 2126 set_req(0, c); 2127 } else if (in(0) != c) { 2128 add_prec(c); 2129 } 2130 } 2131 2132 //============================================================================= 2133 //------------------------------yank------------------------------------------- 2134 // Find and remove 2135 void Node_List::yank( Node *n ) { 2136 uint i; 2137 for( i = 0; i < _cnt; i++ ) 2138 if( _nodes[i] == n ) 2139 break; 2140 2141 if( i < _cnt ) 2142 _nodes[i] = _nodes[--_cnt]; 2143 } 2144 2145 //------------------------------dump------------------------------------------- 2146 void Node_List::dump() const { 2147 #ifndef PRODUCT 2148 for( uint i = 0; i < _cnt; i++ ) 2149 if( _nodes[i] ) { 2150 tty->print("%5d--> ",i); 2151 _nodes[i]->dump(); 2152 } 2153 #endif 2154 } 2155 2156 void Node_List::dump_simple() const { 2157 #ifndef PRODUCT 2158 for( uint i = 0; i < _cnt; i++ ) 2159 if( _nodes[i] ) { 2160 tty->print(" %d", _nodes[i]->_idx); 2161 } else { 2162 tty->print(" NULL"); 2163 } 2164 #endif 2165 } 2166 2167 //============================================================================= 2168 //------------------------------remove----------------------------------------- 2169 void Unique_Node_List::remove( Node *n ) { 2170 if( _in_worklist[n->_idx] ) { 2171 for( uint i = 0; i < size(); i++ ) 2172 if( _nodes[i] == n ) { 2173 map(i,Node_List::pop()); 2174 _in_worklist >>= n->_idx; 2175 return; 2176 } 2177 ShouldNotReachHere(); 2178 } 2179 } 2180 2181 //-----------------------remove_useless_nodes---------------------------------- 2182 // Remove useless nodes from worklist 2183 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { 2184 2185 for( uint i = 0; i < size(); ++i ) { 2186 Node *n = at(i); 2187 assert( n != NULL, "Did not expect null entries in worklist"); 2188 if( ! useful.test(n->_idx) ) { 2189 _in_worklist >>= n->_idx; 2190 map(i,Node_List::pop()); 2191 // Node *replacement = Node_List::pop(); 2192 // if( i != size() ) { // Check if removing last entry 2193 // _nodes[i] = replacement; 2194 // } 2195 --i; // Visit popped node 2196 // If it was last entry, loop terminates since size() was also reduced 2197 } 2198 } 2199 } 2200 2201 //============================================================================= 2202 void Node_Stack::grow() { 2203 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top 2204 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode)); 2205 size_t max = old_max << 1; // max * 2 2206 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max); 2207 _inode_max = _inodes + max; 2208 _inode_top = _inodes + old_top; // restore _top 2209 } 2210 2211 // Node_Stack is used to map nodes. 2212 Node* Node_Stack::find(uint idx) const { 2213 uint sz = size(); 2214 for (uint i=0; i < sz; i++) { 2215 if (idx == index_at(i) ) 2216 return node_at(i); 2217 } 2218 return NULL; 2219 } 2220 2221 //============================================================================= 2222 uint TypeNode::size_of() const { return sizeof(*this); } 2223 #ifndef PRODUCT 2224 void TypeNode::dump_spec(outputStream *st) const { 2225 if( !Verbose && !WizardMode ) { 2226 // standard dump does this in Verbose and WizardMode 2227 st->print(" #"); _type->dump_on(st); 2228 } 2229 } 2230 #endif 2231 uint TypeNode::hash() const { 2232 return Node::hash() + _type->hash(); 2233 } 2234 uint TypeNode::cmp( const Node &n ) const 2235 { return !Type::cmp( _type, ((TypeNode&)n)._type ); } 2236 const Type *TypeNode::bottom_type() const { return _type; } 2237 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; } 2238 2239 //------------------------------ideal_reg-------------------------------------- 2240 uint TypeNode::ideal_reg() const { 2241 return _type->ideal_reg(); 2242 }