1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_PHASEX_HPP 26 #define SHARE_OPTO_PHASEX_HPP 27 28 #include "libadt/dict.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "opto/memnode.hpp" 32 #include "opto/node.hpp" 33 #include "opto/phase.hpp" 34 #include "opto/type.hpp" 35 36 class Compile; 37 class ConINode; 38 class ConLNode; 39 class Node; 40 class Type; 41 class PhaseTransform; 42 class PhaseGVN; 43 class PhaseIterGVN; 44 class PhaseCCP; 45 class PhasePeephole; 46 class PhaseRegAlloc; 47 48 49 //----------------------------------------------------------------------------- 50 // Expandable closed hash-table of nodes, initialized to NULL. 51 // Note that the constructor just zeros things 52 // Storage is reclaimed when the Arena's lifetime is over. 53 class NodeHash : public StackObj { 54 protected: 55 Arena *_a; // Arena to allocate in 56 uint _max; // Size of table (power of 2) 57 uint _inserts; // For grow and debug, count of hash_inserts 58 uint _insert_limit; // 'grow' when _inserts reaches _insert_limit 59 Node **_table; // Hash table of Node pointers 60 Node *_sentinel; // Replaces deleted entries in hash table 61 62 public: 63 NodeHash(uint est_max_size); 64 NodeHash(Arena *arena, uint est_max_size); 65 NodeHash(NodeHash *use_this_state); 66 #ifdef ASSERT 67 ~NodeHash(); // Unlock all nodes upon destruction of table. 68 void operator=(const NodeHash&); // Unlock all nodes upon replacement of table. 69 #endif 70 Node *hash_find(const Node*);// Find an equivalent version in hash table 71 Node *hash_find_insert(Node*);// If not in table insert else return found node 72 void hash_insert(Node*); // Insert into hash table 73 bool hash_delete(const Node*);// Replace with _sentinel in hash table 74 void check_grow() { 75 _inserts++; 76 if( _inserts == _insert_limit ) { grow(); } 77 assert( _inserts <= _insert_limit, "hash table overflow"); 78 assert( _inserts < _max, "hash table overflow" ); 79 } 80 static uint round_up(uint); // Round up to nearest power of 2 81 void grow(); // Grow _table to next power of 2 and rehash 82 // Return 75% of _max, rounded up. 83 uint insert_limit() const { return _max - (_max>>2); } 84 85 void clear(); // Set all entries to NULL, keep storage. 86 // Size of hash table 87 uint size() const { return _max; } 88 // Return Node* at index in table 89 Node *at(uint table_index) { 90 assert(table_index < _max, "Must be within table"); 91 return _table[table_index]; 92 } 93 94 void remove_useless_nodes(VectorSet &useful); // replace with sentinel 95 void replace_with(NodeHash* nh); 96 void check_no_speculative_types(); // Check no speculative part for type nodes in table 97 98 Node *sentinel() { return _sentinel; } 99 100 #ifndef PRODUCT 101 Node *find_index(uint idx); // For debugging 102 void dump(); // For debugging, dump statistics 103 uint _grows; // For debugging, count of table grow()s 104 uint _look_probes; // For debugging, count of hash probes 105 uint _lookup_hits; // For debugging, count of hash_finds 106 uint _lookup_misses; // For debugging, count of hash_finds 107 uint _insert_probes; // For debugging, count of hash probes 108 uint _delete_probes; // For debugging, count of hash probes for deletes 109 uint _delete_hits; // For debugging, count of hash probes for deletes 110 uint _delete_misses; // For debugging, count of hash probes for deletes 111 uint _total_inserts; // For debugging, total inserts into hash table 112 uint _total_insert_probes; // For debugging, total probes while inserting 113 #endif 114 }; 115 116 117 //----------------------------------------------------------------------------- 118 // Map dense integer indices to Types. Uses classic doubling-array trick. 119 // Abstractly provides an infinite array of Type*'s, initialized to NULL. 120 // Note that the constructor just zeros things, and since I use Arena 121 // allocation I do not need a destructor to reclaim storage. 122 // Despite the general name, this class is customized for use by PhaseTransform. 123 class Type_Array : public StackObj { 124 Arena *_a; // Arena to allocate in 125 uint _max; 126 const Type **_types; 127 void grow( uint i ); // Grow array node to fit 128 const Type *operator[] ( uint i ) const // Lookup, or NULL for not mapped 129 { return (i<_max) ? _types[i] : (Type*)NULL; } 130 friend class PhaseTransform; 131 public: 132 Type_Array(Arena *a) : _a(a), _max(0), _types(0) {} 133 Type_Array(Type_Array *ta) : _a(ta->_a), _max(ta->_max), _types(ta->_types) { } 134 const Type *fast_lookup(uint i) const{assert(i<_max,"oob");return _types[i];} 135 // Extend the mapping: index i maps to Type *n. 136 void map( uint i, const Type *n ) { if( i>=_max ) grow(i); _types[i] = n; } 137 uint Size() const { return _max; } 138 #ifndef PRODUCT 139 void dump() const; 140 #endif 141 }; 142 143 144 //------------------------------PhaseRemoveUseless----------------------------- 145 // Remove useless nodes from GVN hash-table, worklist, and graph 146 class PhaseRemoveUseless : public Phase { 147 protected: 148 Unique_Node_List _useful; // Nodes reachable from root 149 // list is allocated from current resource area 150 public: 151 PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num = Remove_Useless); 152 153 Unique_Node_List *get_useful() { return &_useful; } 154 }; 155 156 //------------------------------PhaseRenumber---------------------------------- 157 // Phase that first performs a PhaseRemoveUseless, then it renumbers compiler 158 // structures accordingly. 159 class PhaseRenumberLive : public PhaseRemoveUseless { 160 protected: 161 Type_Array _new_type_array; // Storage for the updated type information. 162 GrowableArray<int> _old2new_map; 163 Node_List _delayed; 164 bool _is_pass_finished; 165 uint _live_node_count; 166 167 int update_embedded_ids(Node* n); 168 int new_index(int old_idx); 169 170 public: 171 PhaseRenumberLive(PhaseGVN* gvn, 172 Unique_Node_List* worklist, Unique_Node_List* new_worklist, 173 PhaseNumber phase_num = Remove_Useless_And_Renumber_Live); 174 }; 175 176 177 //------------------------------PhaseTransform--------------------------------- 178 // Phases that analyze, then transform. Constructing the Phase object does any 179 // global or slow analysis. The results are cached later for a fast 180 // transformation pass. When the Phase object is deleted the cached analysis 181 // results are deleted. 182 class PhaseTransform : public Phase { 183 protected: 184 Arena* _arena; 185 Node_List _nodes; // Map old node indices to new nodes. 186 Type_Array _types; // Map old node indices to Types. 187 188 // ConNode caches: 189 enum { _icon_min = -1 * HeapWordSize, 190 _icon_max = 16 * HeapWordSize, 191 _lcon_min = _icon_min, 192 _lcon_max = _icon_max, 193 _zcon_max = (uint)T_CONFLICT 194 }; 195 ConINode* _icons[_icon_max - _icon_min + 1]; // cached jint constant nodes 196 ConLNode* _lcons[_lcon_max - _lcon_min + 1]; // cached jlong constant nodes 197 ConNode* _zcons[_zcon_max + 1]; // cached is_zero_type nodes 198 void init_con_caches(); 199 200 // Support both int and long caches because either might be an intptr_t, 201 // so they show up frequently in address computations. 202 203 public: 204 PhaseTransform( PhaseNumber pnum ); 205 PhaseTransform( Arena *arena, PhaseNumber pnum ); 206 PhaseTransform( PhaseTransform *phase, PhaseNumber pnum ); 207 208 Arena* arena() { return _arena; } 209 Type_Array& types() { return _types; } 210 void replace_types(Type_Array new_types) { 211 _types = new_types; 212 } 213 // _nodes is used in varying ways by subclasses, which define local accessors 214 uint nodes_size() { 215 return _nodes.size(); 216 } 217 218 public: 219 // Get a previously recorded type for the node n. 220 // This type must already have been recorded. 221 // If you want the type of a very new (untransformed) node, 222 // you must use type_or_null, and test the result for NULL. 223 const Type* type(const Node* n) const { 224 assert(_pnum != Ideal_Loop, "should not be used from PhaseIdealLoop"); 225 assert(n != NULL, "must not be null"); 226 const Type* t = _types.fast_lookup(n->_idx); 227 assert(t != NULL, "must set before get"); 228 return t; 229 } 230 // Get a previously recorded type for the node n, 231 // or else return NULL if there is none. 232 const Type* type_or_null(const Node* n) const { 233 assert(_pnum != Ideal_Loop, "should not be used from PhaseIdealLoop"); 234 return _types.fast_lookup(n->_idx); 235 } 236 // Record a type for a node. 237 void set_type(const Node* n, const Type *t) { 238 assert(t != NULL, "type must not be null"); 239 _types.map(n->_idx, t); 240 } 241 // Record an initial type for a node, the node's bottom type. 242 void set_type_bottom(const Node* n) { 243 // Use this for initialization when bottom_type() (or better) is not handy. 244 // Usually the initialization shoudl be to n->Value(this) instead, 245 // or a hand-optimized value like Type::MEMORY or Type::CONTROL. 246 assert(_types[n->_idx] == NULL, "must set the initial type just once"); 247 _types.map(n->_idx, n->bottom_type()); 248 } 249 // Make sure the types array is big enough to record a size for the node n. 250 // (In product builds, we never want to do range checks on the types array!) 251 void ensure_type_or_null(const Node* n) { 252 if (n->_idx >= _types.Size()) 253 _types.map(n->_idx, NULL); // Grow the types array as needed. 254 } 255 256 // Utility functions: 257 const TypeInt* find_int_type( Node* n); 258 const TypeLong* find_long_type(Node* n); 259 jint find_int_con( Node* n, jint value_if_unknown) { 260 const TypeInt* t = find_int_type(n); 261 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 262 } 263 jlong find_long_con(Node* n, jlong value_if_unknown) { 264 const TypeLong* t = find_long_type(n); 265 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 266 } 267 268 // Make an idealized constant, i.e., one of ConINode, ConPNode, ConFNode, etc. 269 // Same as transform(ConNode::make(t)). 270 ConNode* makecon(const Type* t); 271 virtual ConNode* uncached_makecon(const Type* t) // override in PhaseValues 272 { ShouldNotCallThis(); return NULL; } 273 274 // Fast int or long constant. Same as TypeInt::make(i) or TypeLong::make(l). 275 ConINode* intcon(jint i); 276 ConLNode* longcon(jlong l); 277 278 // Fast zero or null constant. Same as makecon(Type::get_zero_type(bt)). 279 ConNode* zerocon(BasicType bt); 280 281 // Return a node which computes the same function as this node, but 282 // in a faster or cheaper fashion. 283 virtual Node *transform( Node *n ) = 0; 284 285 // Return whether two Nodes are equivalent. 286 // Must not be recursive, since the recursive version is built from this. 287 // For pessimistic optimizations this is simply pointer equivalence. 288 bool eqv(const Node* n1, const Node* n2) const { return n1 == n2; } 289 290 // For pessimistic passes, the return type must monotonically narrow. 291 // For optimistic passes, the return type must monotonically widen. 292 // It is possible to get into a "death march" in either type of pass, 293 // where the types are continually moving but it will take 2**31 or 294 // more steps to converge. This doesn't happen on most normal loops. 295 // 296 // Here is an example of a deadly loop for an optimistic pass, along 297 // with a partial trace of inferred types: 298 // x = phi(0,x'); L: x' = x+1; if (x' >= 0) goto L; 299 // 0 1 join([0..max], 1) 300 // [0..1] [1..2] join([0..max], [1..2]) 301 // [0..2] [1..3] join([0..max], [1..3]) 302 // ... ... ... 303 // [0..max] [min]u[1..max] join([0..max], [min..max]) 304 // [0..max] ==> fixpoint 305 // We would have proven, the hard way, that the iteration space is all 306 // non-negative ints, with the loop terminating due to 32-bit overflow. 307 // 308 // Here is the corresponding example for a pessimistic pass: 309 // x = phi(0,x'); L: x' = x-1; if (x' >= 0) goto L; 310 // int int join([0..max], int) 311 // [0..max] [-1..max-1] join([0..max], [-1..max-1]) 312 // [0..max-1] [-1..max-2] join([0..max], [-1..max-2]) 313 // ... ... ... 314 // [0..1] [-1..0] join([0..max], [-1..0]) 315 // 0 -1 join([0..max], -1) 316 // 0 == fixpoint 317 // We would have proven, the hard way, that the iteration space is {0}. 318 // (Usually, other optimizations will make the "if (x >= 0)" fold up 319 // before we get into trouble. But not always.) 320 // 321 // It's a pleasant thing to observe that the pessimistic pass 322 // will make short work of the optimistic pass's deadly loop, 323 // and vice versa. That is a good example of the complementary 324 // purposes of the CCP (optimistic) vs. GVN (pessimistic) phases. 325 // 326 // In any case, only widen or narrow a few times before going to the 327 // correct flavor of top or bottom. 328 // 329 // This call only needs to be made once as the data flows around any 330 // given cycle. We do it at Phis, and nowhere else. 331 // The types presented are the new type of a phi (computed by PhiNode::Value) 332 // and the previously computed type, last time the phi was visited. 333 // 334 // The third argument is upper limit for the saturated value, 335 // if the phase wishes to widen the new_type. 336 // If the phase is narrowing, the old type provides a lower limit. 337 // Caller guarantees that old_type and new_type are no higher than limit_type. 338 virtual const Type* saturate(const Type* new_type, const Type* old_type, 339 const Type* limit_type) const 340 { ShouldNotCallThis(); return NULL; } 341 342 // Delayed node rehash if this is an IGVN phase 343 virtual void igvn_rehash_node_delayed(Node* n) {} 344 345 // true if CFG node d dominates CFG node n 346 virtual bool is_dominator(Node *d, Node *n) { fatal("unimplemented for this pass"); return false; }; 347 348 #ifndef PRODUCT 349 void dump_old2new_map() const; 350 void dump_new( uint new_lidx ) const; 351 void dump_types() const; 352 void dump_nodes_and_types(const Node *root, uint depth, bool only_ctrl = true); 353 void dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited); 354 355 uint _count_progress; // For profiling, count transforms that make progress 356 void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); } 357 void clear_progress() { _count_progress = 0; } 358 uint made_progress() const { return _count_progress; } 359 360 uint _count_transforms; // For profiling, count transforms performed 361 void set_transforms() { ++_count_transforms; } 362 void clear_transforms() { _count_transforms = 0; } 363 uint made_transforms() const{ return _count_transforms; } 364 365 bool _allow_progress; // progress not allowed during verification pass 366 void set_allow_progress(bool allow) { _allow_progress = allow; } 367 bool allow_progress() { return _allow_progress; } 368 #endif 369 }; 370 371 //------------------------------PhaseValues------------------------------------ 372 // Phase infrastructure to support values 373 class PhaseValues : public PhaseTransform { 374 protected: 375 NodeHash _table; // Hash table for value-numbering 376 377 public: 378 PhaseValues( Arena *arena, uint est_max_size ); 379 PhaseValues( PhaseValues *pt ); 380 PhaseValues( PhaseValues *ptv, const char *dummy ); 381 NOT_PRODUCT( ~PhaseValues(); ) 382 virtual PhaseIterGVN *is_IterGVN() { return 0; } 383 384 // Some Ideal and other transforms delete --> modify --> insert values 385 bool hash_delete(Node *n) { return _table.hash_delete(n); } 386 void hash_insert(Node *n) { _table.hash_insert(n); } 387 Node *hash_find_insert(Node *n){ return _table.hash_find_insert(n); } 388 Node *hash_find(const Node *n) { return _table.hash_find(n); } 389 390 // Used after parsing to eliminate values that are no longer in program 391 void remove_useless_nodes(VectorSet &useful) { 392 _table.remove_useless_nodes(useful); 393 // this may invalidate cached cons so reset the cache 394 init_con_caches(); 395 } 396 397 virtual ConNode* uncached_makecon(const Type* t); // override from PhaseTransform 398 399 virtual const Type* saturate(const Type* new_type, const Type* old_type, 400 const Type* limit_type) const 401 { return new_type; } 402 403 #ifndef PRODUCT 404 uint _count_new_values; // For profiling, count new values produced 405 void inc_new_values() { ++_count_new_values; } 406 void clear_new_values() { _count_new_values = 0; } 407 uint made_new_values() const { return _count_new_values; } 408 #endif 409 }; 410 411 412 //------------------------------PhaseGVN--------------------------------------- 413 // Phase for performing local, pessimistic GVN-style optimizations. 414 class PhaseGVN : public PhaseValues { 415 protected: 416 bool is_dominator_helper(Node *d, Node *n, bool linear_only); 417 418 public: 419 PhaseGVN( Arena *arena, uint est_max_size ) : PhaseValues( arena, est_max_size ) {} 420 PhaseGVN( PhaseGVN *gvn ) : PhaseValues( gvn ) {} 421 PhaseGVN( PhaseGVN *gvn, const char *dummy ) : PhaseValues( gvn, dummy ) {} 422 423 // Return a node which computes the same function as this node, but 424 // in a faster or cheaper fashion. 425 Node *transform( Node *n ); 426 Node *transform_no_reclaim( Node *n ); 427 virtual void record_for_igvn(Node *n) { 428 C->record_for_igvn(n); 429 } 430 431 void replace_with(PhaseGVN* gvn) { 432 _table.replace_with(&gvn->_table); 433 _types = gvn->_types; 434 } 435 436 bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, true); } 437 438 // Helper to call Node::Ideal() and BarrierSetC2::ideal_node(). 439 Node* apply_ideal(Node* i, bool can_reshape); 440 441 // Helper to call Node::Identity() and BarrierSetC2::identity_node(). 442 Node* apply_identity(Node* n); 443 444 // Check for a simple dead loop when a data node references itself. 445 DEBUG_ONLY(void dead_loop_check(Node *n);) 446 }; 447 448 //------------------------------PhaseIterGVN----------------------------------- 449 // Phase for iteratively performing local, pessimistic GVN-style optimizations. 450 // and ideal transformations on the graph. 451 class PhaseIterGVN : public PhaseGVN { 452 private: 453 bool _delay_transform; // When true simply register the node when calling transform 454 // instead of actually optimizing it 455 456 // Idealize old Node 'n' with respect to its inputs and its value 457 virtual Node *transform_old( Node *a_node ); 458 459 // Subsume users of node 'old' into node 'nn' 460 void subsume_node( Node *old, Node *nn ); 461 462 Node_Stack _stack; // Stack used to avoid recursion 463 464 protected: 465 466 // Warm up hash table, type table and initial worklist 467 void init_worklist( Node *a_root ); 468 469 virtual const Type* saturate(const Type* new_type, const Type* old_type, 470 const Type* limit_type) const; 471 // Usually returns new_type. Returns old_type if new_type is only a slight 472 // improvement, such that it would take many (>>10) steps to reach 2**32. 473 474 public: 475 PhaseIterGVN( PhaseIterGVN *igvn ); // Used by CCP constructor 476 PhaseIterGVN( PhaseGVN *gvn ); // Used after Parser 477 PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ); // Used after +VerifyOpto 478 479 // Idealize new Node 'n' with respect to its inputs and its value 480 virtual Node *transform( Node *a_node ); 481 virtual void record_for_igvn(Node *n) { } 482 483 virtual PhaseIterGVN *is_IterGVN() { return this; } 484 485 Unique_Node_List _worklist; // Iterative worklist 486 487 // Given def-use info and an initial worklist, apply Node::Ideal, 488 // Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU 489 // and dominator info to a fixed point. 490 void optimize(); 491 492 #ifndef PRODUCT 493 void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type); 494 void init_verifyPhaseIterGVN(); 495 void verify_PhaseIterGVN(); 496 #endif 497 498 #ifdef ASSERT 499 void dump_infinite_loop_info(Node* n); 500 void trace_PhaseIterGVN_verbose(Node* n, int num_processed); 501 #endif 502 503 // Register a new node with the iter GVN pass without transforming it. 504 // Used when we need to restructure a Region/Phi area and all the Regions 505 // and Phis need to complete this one big transform before any other 506 // transforms can be triggered on the region. 507 // Optional 'orig' is an earlier version of this node. 508 // It is significant only for debugging and profiling. 509 Node* register_new_node_with_optimizer(Node* n, Node* orig = NULL); 510 511 // Kill a globally dead Node. All uses are also globally dead and are 512 // aggressively trimmed. 513 void remove_globally_dead_node( Node *dead ); 514 515 // Kill all inputs to a dead node, recursively making more dead nodes. 516 // The Node must be dead locally, i.e., have no uses. 517 void remove_dead_node( Node *dead ) { 518 assert(dead->outcnt() == 0 && !dead->is_top(), "node must be dead"); 519 remove_globally_dead_node(dead); 520 } 521 522 // Add users of 'n' to worklist 523 void add_users_to_worklist0( Node *n ); 524 void add_users_to_worklist ( Node *n ); 525 526 // Replace old node with new one. 527 void replace_node( Node *old, Node *nn ) { 528 add_users_to_worklist(old); 529 hash_delete(old); // Yank from hash before hacking edges 530 subsume_node(old, nn); 531 } 532 533 // Delayed node rehash: remove a node from the hash table and rehash it during 534 // next optimizing pass 535 void rehash_node_delayed(Node* n) { 536 hash_delete(n); 537 _worklist.push(n); 538 } 539 540 void igvn_rehash_node_delayed(Node* n) { 541 rehash_node_delayed(n); 542 } 543 544 // Replace ith edge of "n" with "in" 545 void replace_input_of(Node* n, int i, Node* in) { 546 rehash_node_delayed(n); 547 n->set_req(i, in); 548 } 549 550 // Delete ith edge of "n" 551 void delete_input_of(Node* n, int i) { 552 rehash_node_delayed(n); 553 n->del_req(i); 554 } 555 556 bool delay_transform() const { return _delay_transform; } 557 558 void set_delay_transform(bool delay) { 559 _delay_transform = delay; 560 } 561 562 // Clone loop predicates. Defined in loopTransform.cpp. 563 Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); 564 // Create a new if below new_entry for the predicate to be cloned 565 ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, 566 Deoptimization::DeoptReason reason, 567 int opcode); 568 569 void remove_speculative_types(); 570 void check_no_speculative_types() { 571 _table.check_no_speculative_types(); 572 } 573 574 bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, false); } 575 576 #ifndef PRODUCT 577 protected: 578 // Sub-quadratic implementation of VerifyIterativeGVN. 579 julong _verify_counter; 580 julong _verify_full_passes; 581 enum { _verify_window_size = 30 }; 582 Node* _verify_window[_verify_window_size]; 583 void verify_step(Node* n); 584 #endif 585 }; 586 587 //------------------------------PhaseCCP--------------------------------------- 588 // Phase for performing global Conditional Constant Propagation. 589 // Should be replaced with combined CCP & GVN someday. 590 class PhaseCCP : public PhaseIterGVN { 591 // Non-recursive. Use analysis to transform single Node. 592 virtual Node *transform_once( Node *n ); 593 594 public: 595 PhaseCCP( PhaseIterGVN *igvn ); // Compute conditional constants 596 NOT_PRODUCT( ~PhaseCCP(); ) 597 598 // Worklist algorithm identifies constants 599 void analyze(); 600 // Recursive traversal of program. Used analysis to modify program. 601 virtual Node *transform( Node *n ); 602 // Do any transformation after analysis 603 void do_transform(); 604 605 virtual const Type* saturate(const Type* new_type, const Type* old_type, 606 const Type* limit_type) const; 607 // Returns new_type->widen(old_type), which increments the widen bits until 608 // giving up with TypeInt::INT or TypeLong::LONG. 609 // Result is clipped to limit_type if necessary. 610 611 #ifndef PRODUCT 612 static uint _total_invokes; // For profiling, count invocations 613 void inc_invokes() { ++PhaseCCP::_total_invokes; } 614 615 static uint _total_constants; // For profiling, count constants found 616 uint _count_constants; 617 void clear_constants() { _count_constants = 0; } 618 void inc_constants() { ++_count_constants; } 619 uint count_constants() const { return _count_constants; } 620 621 static void print_statistics(); 622 #endif 623 }; 624 625 626 //------------------------------PhasePeephole---------------------------------- 627 // Phase for performing peephole optimizations on register allocated basic blocks. 628 class PhasePeephole : public PhaseTransform { 629 PhaseRegAlloc *_regalloc; 630 PhaseCFG &_cfg; 631 // Recursive traversal of program. Pure function is unused in this phase 632 virtual Node *transform( Node *n ); 633 634 public: 635 PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg ); 636 NOT_PRODUCT( ~PhasePeephole(); ) 637 638 // Do any transformation after analysis 639 void do_transform(); 640 641 #ifndef PRODUCT 642 static uint _total_peepholes; // For profiling, count peephole rules applied 643 uint _count_peepholes; 644 void clear_peepholes() { _count_peepholes = 0; } 645 void inc_peepholes() { ++_count_peepholes; } 646 uint count_peepholes() const { return _count_peepholes; } 647 648 static void print_statistics(); 649 #endif 650 }; 651 652 #endif // SHARE_OPTO_PHASEX_HPP