1 /*
   2  * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_VM_OPTO_SUPERWORD_HPP
  25 #define SHARE_VM_OPTO_SUPERWORD_HPP
  26 
  27 #include "opto/loopnode.hpp"
  28 #include "opto/node.hpp"
  29 #include "opto/phaseX.hpp"
  30 #include "opto/vectornode.hpp"
  31 #include "utilities/growableArray.hpp"
  32 
  33 //
  34 //                  S U P E R W O R D   T R A N S F O R M
  35 //
  36 // SuperWords are short, fixed length vectors.
  37 //
  38 // Algorithm from:
  39 //
  40 // Exploiting SuperWord Level Parallelism with
  41 //   Multimedia Instruction Sets
  42 // by
  43 //   Samuel Larsen and Saman Amarasinghe
  44 //   MIT Laboratory for Computer Science
  45 // date
  46 //   May 2000
  47 // published in
  48 //   ACM SIGPLAN Notices
  49 //   Proceedings of ACM PLDI '00,  Volume 35 Issue 5
  50 //
  51 // Definition 3.1 A Pack is an n-tuple, <s1, ...,sn>, where
  52 // s1,...,sn are independent isomorphic statements in a basic
  53 // block.
  54 //
  55 // Definition 3.2 A PackSet is a set of Packs.
  56 //
  57 // Definition 3.3 A Pair is a Pack of size two, where the
  58 // first statement is considered the left element, and the
  59 // second statement is considered the right element.
  60 
  61 class SWPointer;
  62 class OrderedPair;
  63 
  64 // ========================= Dependence Graph =====================
  65 
  66 class DepMem;
  67 
  68 //------------------------------DepEdge---------------------------
  69 // An edge in the dependence graph.  The edges incident to a dependence
  70 // node are threaded through _next_in for incoming edges and _next_out
  71 // for outgoing edges.
  72 class DepEdge : public ResourceObj {
  73  protected:
  74   DepMem* _pred;
  75   DepMem* _succ;
  76   DepEdge* _next_in;   // list of in edges, null terminated
  77   DepEdge* _next_out;  // list of out edges, null terminated
  78 
  79  public:
  80   DepEdge(DepMem* pred, DepMem* succ, DepEdge* next_in, DepEdge* next_out) :
  81     _pred(pred), _succ(succ), _next_in(next_in), _next_out(next_out) {}
  82 
  83   DepEdge* next_in()  { return _next_in; }
  84   DepEdge* next_out() { return _next_out; }
  85   DepMem*  pred()     { return _pred; }
  86   DepMem*  succ()     { return _succ; }
  87 
  88   void print();
  89 };
  90 
  91 //------------------------------DepMem---------------------------
  92 // A node in the dependence graph.  _in_head starts the threaded list of
  93 // incoming edges, and _out_head starts the list of outgoing edges.
  94 class DepMem : public ResourceObj {
  95  protected:
  96   Node*    _node;     // Corresponding ideal node
  97   DepEdge* _in_head;  // Head of list of in edges, null terminated
  98   DepEdge* _out_head; // Head of list of out edges, null terminated
  99 
 100  public:
 101   DepMem(Node* node) : _node(node), _in_head(NULL), _out_head(NULL) {}
 102 
 103   Node*    node()                { return _node;     }
 104   DepEdge* in_head()             { return _in_head;  }
 105   DepEdge* out_head()            { return _out_head; }
 106   void set_in_head(DepEdge* hd)  { _in_head = hd;    }
 107   void set_out_head(DepEdge* hd) { _out_head = hd;   }
 108 
 109   int in_cnt();  // Incoming edge count
 110   int out_cnt(); // Outgoing edge count
 111 
 112   void print();
 113 };
 114 
 115 //------------------------------DepGraph---------------------------
 116 class DepGraph VALUE_OBJ_CLASS_SPEC {
 117  protected:
 118   Arena* _arena;
 119   GrowableArray<DepMem*> _map;
 120   DepMem* _root;
 121   DepMem* _tail;
 122 
 123  public:
 124   DepGraph(Arena* a) : _arena(a), _map(a, 8,  0, NULL) {
 125     _root = new (_arena) DepMem(NULL);
 126     _tail = new (_arena) DepMem(NULL);
 127   }
 128 
 129   DepMem* root() { return _root; }
 130   DepMem* tail() { return _tail; }
 131 
 132   // Return dependence node corresponding to an ideal node
 133   DepMem* dep(Node* node) { return _map.at(node->_idx); }
 134 
 135   // Make a new dependence graph node for an ideal node.
 136   DepMem* make_node(Node* node);
 137 
 138   // Make a new dependence graph edge dprec->dsucc
 139   DepEdge* make_edge(DepMem* dpred, DepMem* dsucc);
 140 
 141   DepEdge* make_edge(Node* pred,   Node* succ)   { return make_edge(dep(pred), dep(succ)); }
 142   DepEdge* make_edge(DepMem* pred, Node* succ)   { return make_edge(pred,      dep(succ)); }
 143   DepEdge* make_edge(Node* pred,   DepMem* succ) { return make_edge(dep(pred), succ);      }
 144 
 145   void init() { _map.clear(); } // initialize
 146 
 147   void print(Node* n)   { dep(n)->print(); }
 148   void print(DepMem* d) { d->print(); }
 149 };
 150 
 151 //------------------------------DepPreds---------------------------
 152 // Iterator over predecessors in the dependence graph and
 153 // non-memory-graph inputs of ideal nodes.
 154 class DepPreds : public StackObj {
 155 private:
 156   Node*    _n;
 157   int      _next_idx, _end_idx;
 158   DepEdge* _dep_next;
 159   Node*    _current;
 160   bool     _done;
 161 
 162 public:
 163   DepPreds(Node* n, DepGraph& dg);
 164   Node* current() { return _current; }
 165   bool  done()    { return _done; }
 166   void  next();
 167 };
 168 
 169 //------------------------------DepSuccs---------------------------
 170 // Iterator over successors in the dependence graph and
 171 // non-memory-graph outputs of ideal nodes.
 172 class DepSuccs : public StackObj {
 173 private:
 174   Node*    _n;
 175   int      _next_idx, _end_idx;
 176   DepEdge* _dep_next;
 177   Node*    _current;
 178   bool     _done;
 179 
 180 public:
 181   DepSuccs(Node* n, DepGraph& dg);
 182   Node* current() { return _current; }
 183   bool  done()    { return _done; }
 184   void  next();
 185 };
 186 
 187 
 188 // ========================= SuperWord =====================
 189 
 190 // -----------------------------SWNodeInfo---------------------------------
 191 // Per node info needed by SuperWord
 192 class SWNodeInfo VALUE_OBJ_CLASS_SPEC {
 193  public:
 194   int         _alignment; // memory alignment for a node
 195   int         _depth;     // Max expression (DAG) depth from block start
 196   const Type* _velt_type; // vector element type
 197   Node_List*  _my_pack;   // pack containing this node
 198 
 199   SWNodeInfo() : _alignment(-1), _depth(0), _velt_type(NULL), _my_pack(NULL) {}
 200   static const SWNodeInfo initial;
 201 };
 202 
 203 // JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
 204 //------------------------------OrderedPair---------------------------
 205 // Ordered pair of Node*.
 206 class OrderedPair VALUE_OBJ_CLASS_SPEC {
 207  protected:
 208   Node* _p1;
 209   Node* _p2;
 210  public:
 211   OrderedPair() : _p1(NULL), _p2(NULL) {}
 212   OrderedPair(Node* p1, Node* p2) {
 213     if (p1->_idx < p2->_idx) {
 214       _p1 = p1; _p2 = p2;
 215     } else {
 216       _p1 = p2; _p2 = p1;
 217     }
 218   }
 219 
 220   bool operator==(const OrderedPair &rhs) {
 221     return _p1 == rhs._p1 && _p2 == rhs._p2;
 222   }
 223   void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
 224 
 225   static const OrderedPair initial;
 226 };
 227 
 228 // -----------------------------SuperWord---------------------------------
 229 // Transforms scalar operations into packed (superword) operations.
 230 class SuperWord : public ResourceObj {
 231  friend class SWPointer;
 232  private:
 233   PhaseIdealLoop* _phase;
 234   Arena*          _arena;
 235   PhaseIterGVN   &_igvn;
 236 
 237   enum consts { top_align = -1, bottom_align = -666 };
 238 
 239   GrowableArray<Node_List*> _packset;    // Packs for the current block
 240 
 241   GrowableArray<int> _bb_idx;            // Map from Node _idx to index within block
 242 
 243   GrowableArray<Node*> _block;           // Nodes in current block
 244   GrowableArray<Node*> _data_entry;      // Nodes with all inputs from outside
 245   GrowableArray<Node*> _mem_slice_head;  // Memory slice head nodes
 246   GrowableArray<Node*> _mem_slice_tail;  // Memory slice tail nodes
 247   GrowableArray<Node*> _iteration_first; // nodes in the generation that has deps from phi
 248   GrowableArray<Node*> _iteration_last;  // nodes in the generation that has deps to   phi
 249   GrowableArray<SWNodeInfo> _node_info;  // Info needed per node
 250   CloneMap&                 _clone_map;  // map of nodes created in cloning
 251 
 252   MemNode* _align_to_ref;                // Memory reference that pre-loop will align to
 253 
 254   GrowableArray<OrderedPair> _disjoint_ptrs; // runtime disambiguated pointer pairs
 255 
 256   DepGraph _dg; // Dependence graph
 257 
 258   // Scratch pads
 259   VectorSet    _visited;       // Visited set
 260   VectorSet    _post_visited;  // Post-visited set
 261   Node_Stack   _n_idx_list;    // List of (node,index) pairs
 262   GrowableArray<Node*> _nlist; // List of nodes
 263   GrowableArray<Node*> _stk;   // Stack of nodes
 264 
 265  public:
 266   SuperWord(PhaseIdealLoop* phase);
 267 
 268   void transform_loop(IdealLoopTree* lpt, bool do_optimization);
 269 
 270   void unrolling_analysis(int &local_loop_unroll_factor);
 271 
 272   // Accessors for SWPointer
 273   PhaseIdealLoop* phase()          { return _phase; }
 274   IdealLoopTree* lpt()             { return _lpt; }
 275   PhiNode* iv()                    { return _iv; }
 276 
 277   bool early_return()              { return _early_return; }
 278 
 279 #ifndef PRODUCT
 280   bool     is_debug()              { return _vector_loop_debug > 0; }
 281   bool     is_trace_alignment()    { return (_vector_loop_debug & 2) > 0; }
 282   bool     is_trace_mem_slice()    { return (_vector_loop_debug & 4) > 0; }
 283   bool     is_trace_loop()         { return (_vector_loop_debug & 8) > 0; }
 284   bool     is_trace_adjacent()     { return (_vector_loop_debug & 16) > 0; }
 285 #endif
 286   bool     do_vector_loop()        { return _do_vector_loop; }
 287  private:
 288   IdealLoopTree* _lpt;             // Current loop tree node
 289   LoopNode*      _lp;              // Current LoopNode
 290   Node*          _bb;              // Current basic block
 291   PhiNode*       _iv;              // Induction var
 292   bool           _race_possible;   // In cases where SDMU is true
 293   bool           _early_return;    // True if we do not initialize
 294   bool           _do_vector_loop;  // whether to do vectorization/simd style
 295   int            _num_work_vecs;   // Number of non memory vector operations
 296   int            _num_reductions;  // Number of reduction expressions applied
 297   int            _ii_first;        // generation with direct deps from mem phi
 298   int            _ii_last;         // generation with direct deps to   mem phi
 299   GrowableArray<int> _ii_order;
 300 #ifndef PRODUCT
 301   uintx          _vector_loop_debug; // provide more printing in debug mode
 302 #endif
 303 
 304   // Accessors
 305   Arena* arena()                   { return _arena; }
 306 
 307   Node* bb()                       { return _bb; }
 308   void  set_bb(Node* bb)           { _bb = bb; }
 309 
 310   void set_lpt(IdealLoopTree* lpt) { _lpt = lpt; }
 311 
 312   LoopNode* lp()                   { return _lp; }
 313   void      set_lp(LoopNode* lp)   { _lp = lp;
 314                                      _iv = lp->as_CountedLoop()->phi()->as_Phi(); }
 315   int      iv_stride()             { return lp()->as_CountedLoop()->stride_con(); }
 316 
 317   int vector_width(Node* n) {
 318     BasicType bt = velt_basic_type(n);
 319     return MIN2(ABS(iv_stride()), Matcher::max_vector_size(bt));
 320   }
 321   int vector_width_in_bytes(Node* n) {
 322     BasicType bt = velt_basic_type(n);
 323     return vector_width(n)*type2aelembytes(bt);
 324   }
 325   MemNode* align_to_ref()            { return _align_to_ref; }
 326   void  set_align_to_ref(MemNode* m) { _align_to_ref = m; }
 327 
 328   Node* ctrl(Node* n) const { return _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; }
 329 
 330   // block accessors
 331   bool in_bb(Node* n)      { return n != NULL && n->outcnt() > 0 && ctrl(n) == _bb; }
 332   int  bb_idx(Node* n)     { assert(in_bb(n), "must be"); return _bb_idx.at(n->_idx); }
 333   void set_bb_idx(Node* n, int i) { _bb_idx.at_put_grow(n->_idx, i); }
 334 
 335   // visited set accessors
 336   void visited_clear()           { _visited.Clear(); }
 337   void visited_set(Node* n)      { return _visited.set(bb_idx(n)); }
 338   int visited_test(Node* n)      { return _visited.test(bb_idx(n)); }
 339   int visited_test_set(Node* n)  { return _visited.test_set(bb_idx(n)); }
 340   void post_visited_clear()      { _post_visited.Clear(); }
 341   void post_visited_set(Node* n) { return _post_visited.set(bb_idx(n)); }
 342   int post_visited_test(Node* n) { return _post_visited.test(bb_idx(n)); }
 343 
 344   // Ensure node_info contains element "i"
 345   void grow_node_info(int i) { if (i >= _node_info.length()) _node_info.at_put_grow(i, SWNodeInfo::initial); }
 346 
 347   // memory alignment for a node
 348   int alignment(Node* n)                     { return _node_info.adr_at(bb_idx(n))->_alignment; }
 349   void set_alignment(Node* n, int a)         { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_alignment = a; }
 350 
 351   // Max expression (DAG) depth from beginning of the block for each node
 352   int depth(Node* n)                         { return _node_info.adr_at(bb_idx(n))->_depth; }
 353   void set_depth(Node* n, int d)             { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_depth = d; }
 354 
 355   // vector element type
 356   const Type* velt_type(Node* n)             { return _node_info.adr_at(bb_idx(n))->_velt_type; }
 357   BasicType velt_basic_type(Node* n)         { return velt_type(n)->array_element_basic_type(); }
 358   void set_velt_type(Node* n, const Type* t) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_velt_type = t; }
 359   bool same_velt_type(Node* n1, Node* n2);
 360 
 361   // my_pack
 362   Node_List* my_pack(Node* n)                { return !in_bb(n) ? NULL : _node_info.adr_at(bb_idx(n))->_my_pack; }
 363   void set_my_pack(Node* n, Node_List* p)    { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_my_pack = p; }
 364 
 365   // CloneMap utilities
 366   bool same_origin_idx(Node* a, Node* b) const;
 367   bool same_generation(Node* a, Node* b) const;
 368 
 369   // methods
 370 
 371   // Extract the superword level parallelism
 372   void SLP_extract();
 373   // Find the adjacent memory references and create pack pairs for them.
 374   void find_adjacent_refs();
 375   // Tracing support
 376   #ifndef PRODUCT
 377   void find_adjacent_refs_trace_1(Node* best_align_to_mem_ref, int best_iv_adjustment);
 378   #endif
 379   // Find a memory reference to align the loop induction variable to.
 380   MemNode* find_align_to_ref(Node_List &memops);
 381   // Calculate loop's iv adjustment for this memory ops.
 382   int get_iv_adjustment(MemNode* mem);
 383   // Can the preloop align the reference to position zero in the vector?
 384   bool ref_is_alignable(SWPointer& p);
 385   // rebuild the graph so all loads in different iterations of cloned loop become dependant on phi node (in _do_vector_loop only)
 386   bool hoist_loads_in_graph();
 387   // Test whether MemNode::Memory dependency to the same load but in the first iteration of this loop is coming from memory phi
 388   // Return false if failed
 389   Node* find_phi_for_mem_dep(LoadNode* ld);
 390   // Return same node but from the first generation. Return 0, if not found
 391   Node* first_node(Node* nd);
 392   // Return same node as this but from the last generation. Return 0, if not found
 393   Node* last_node(Node* n);
 394   // Mark nodes belonging to first and last generation
 395   // returns first generation index or -1 if vectorization/simd is impossible
 396   int mark_generations();
 397   // swapping inputs of commutative instruction (Add or Mul)
 398   bool fix_commutative_inputs(Node* gold, Node* fix);
 399   // make packs forcefully (in _do_vector_loop only)
 400   bool pack_parallel();
 401   // Construct dependency graph.
 402   void dependence_graph();
 403   // Return a memory slice (node list) in predecessor order starting at "start"
 404   void mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds);
 405   // Can s1 and s2 be in a pack with s1 immediately preceding s2 and  s1 aligned at "align"
 406   bool stmts_can_pack(Node* s1, Node* s2, int align);
 407   // Does s exist in a pack at position pos?
 408   bool exists_at(Node* s, uint pos);
 409   // Is s1 immediately before s2 in memory?
 410   bool are_adjacent_refs(Node* s1, Node* s2);
 411   // Are s1 and s2 similar?
 412   bool isomorphic(Node* s1, Node* s2);
 413   // Is there no data path from s1 to s2 or s2 to s1?
 414   bool independent(Node* s1, Node* s2);
 415   // Is there a data path between s1 and s2 and both are reductions?
 416   bool reduction(Node* s1, Node* s2);
 417   // Helper for independent
 418   bool independent_path(Node* shallow, Node* deep, uint dp=0);
 419   void set_alignment(Node* s1, Node* s2, int align);
 420   int data_size(Node* s);
 421   // Extend packset by following use->def and def->use links from pack members.
 422   void extend_packlist();
 423   // Extend the packset by visiting operand definitions of nodes in pack p
 424   bool follow_use_defs(Node_List* p);
 425   // Extend the packset by visiting uses of nodes in pack p
 426   bool follow_def_uses(Node_List* p);
 427   // For extended packsets, ordinally arrange uses packset by major component
 428   void order_def_uses(Node_List* p);
 429   // Estimate the savings from executing s1 and s2 as a pack
 430   int est_savings(Node* s1, Node* s2);
 431   int adjacent_profit(Node* s1, Node* s2);
 432   int pack_cost(int ct);
 433   int unpack_cost(int ct);
 434   // Combine packs A and B with A.last == B.first into A.first..,A.last,B.second,..B.last
 435   void combine_packs();
 436   // Construct the map from nodes to packs.
 437   void construct_my_pack_map();
 438   // Remove packs that are not implemented or not profitable.
 439   void filter_packs();
 440   // Adjust the memory graph for the packed operations
 441   void schedule();
 442   // Remove "current" from its current position in the memory graph and insert
 443   // it after the appropriate insert points (lip or uip);
 444   void remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip, Node *uip, Unique_Node_List &schd_before);
 445   // Within a store pack, schedule stores together by moving out the sandwiched memory ops according
 446   // to dependence info; and within a load pack, move loads down to the last executed load.
 447   void co_locate_pack(Node_List* p);
 448   // Convert packs into vector node operations
 449   void output();
 450   // Create a vector operand for the nodes in pack p for operand: in(opd_idx)
 451   Node* vector_opd(Node_List* p, int opd_idx);
 452   // Can code be generated for pack p?
 453   bool implemented(Node_List* p);
 454   // For pack p, are all operands and all uses (with in the block) vector?
 455   bool profitable(Node_List* p);
 456   // If a use of pack p is not a vector use, then replace the use with an extract operation.
 457   void insert_extracts(Node_List* p);
 458   // Is use->in(u_idx) a vector use?
 459   bool is_vector_use(Node* use, int u_idx);
 460   // Construct reverse postorder list of block members
 461   bool construct_bb();
 462   // Initialize per node info
 463   void initialize_bb();
 464   // Insert n into block after pos
 465   void bb_insert_after(Node* n, int pos);
 466   // Compute max depth for expressions from beginning of block
 467   void compute_max_depth();
 468   // Compute necessary vector element type for expressions
 469   void compute_vector_element_type();
 470   // Are s1 and s2 in a pack pair and ordered as s1,s2?
 471   bool in_packset(Node* s1, Node* s2);
 472   // Is s in pack p?
 473   Node_List* in_pack(Node* s, Node_List* p);
 474   // Remove the pack at position pos in the packset
 475   void remove_pack_at(int pos);
 476   // Return the node executed first in pack p.
 477   Node* executed_first(Node_List* p);
 478   // Return the node executed last in pack p.
 479   Node* executed_last(Node_List* p);
 480   static LoadNode::ControlDependency control_dependency(Node_List* p);
 481   // Alignment within a vector memory reference
 482   int memory_alignment(MemNode* s, int iv_adjust);
 483   // (Start, end] half-open range defining which operands are vector
 484   void vector_opd_range(Node* n, uint* start, uint* end);
 485   // Smallest type containing range of values
 486   const Type* container_type(Node* n);
 487   // Adjust pre-loop limit so that in main loop, a load/store reference
 488   // to align_to_ref will be a position zero in the vector.
 489   void align_initial_loop_index(MemNode* align_to_ref);
 490   // Find pre loop end from main loop.  Returns null if none.
 491   CountedLoopEndNode* get_pre_loop_end(CountedLoopNode *cl);
 492   // Is the use of d1 in u1 at the same operand position as d2 in u2?
 493   bool opnd_positions_match(Node* d1, Node* u1, Node* d2, Node* u2);
 494   void init();
 495   // clean up some basic structures - used if the ideal graph was rebuilt
 496   void restart();
 497 
 498   // print methods
 499   void print_packset();
 500   void print_pack(Node_List* p);
 501   void print_bb();
 502   void print_stmt(Node* s);
 503   char* blank(uint depth);
 504 
 505   void packset_sort(int n);
 506 };
 507 
 508 
 509 
 510 //------------------------------SWPointer---------------------------
 511 // Information about an address for dependence checking and vector alignment
 512 class SWPointer VALUE_OBJ_CLASS_SPEC {
 513  protected:
 514   MemNode*   _mem;           // My memory reference node
 515   SuperWord* _slp;           // SuperWord class
 516 
 517   Node* _base;               // NULL if unsafe nonheap reference
 518   Node* _adr;                // address pointer
 519   jint  _scale;              // multiplier for iv (in bytes), 0 if no loop iv
 520   jint  _offset;             // constant offset (in bytes)
 521   Node* _invar;              // invariant offset (in bytes), NULL if none
 522   bool  _negate_invar;       // if true then use: (0 - _invar)
 523   Node_Stack* _nstack;       // stack used to record a swpointer trace of variants
 524   bool        _analyze_only; // Used in loop unrolling only for swpointer trace
 525   uint        _stack_idx;    // Used in loop unrolling only for swpointer trace
 526 
 527   PhaseIdealLoop* phase() { return _slp->phase(); }
 528   IdealLoopTree*  lpt()   { return _slp->lpt(); }
 529   PhiNode*        iv()    { return _slp->iv();  } // Induction var
 530 
 531   bool invariant(Node* n);
 532 
 533   // Match: k*iv + offset
 534   bool scaled_iv_plus_offset(Node* n);
 535   // Match: k*iv where k is a constant that's not zero
 536   bool scaled_iv(Node* n);
 537   // Match: offset is (k [+/- invariant])
 538   bool offset_plus_k(Node* n, bool negate = false);
 539 
 540  public:
 541   enum CMP {
 542     Less          = 1,
 543     Greater       = 2,
 544     Equal         = 4,
 545     NotEqual      = (Less | Greater),
 546     NotComparable = (Less | Greater | Equal)
 547   };
 548 
 549   SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only);
 550   // Following is used to create a temporary object during
 551   // the pattern match of an address expression.
 552   SWPointer(SWPointer* p);
 553 
 554   bool valid()  { return _adr != NULL; }
 555   bool has_iv() { return _scale != 0; }
 556 
 557   Node* base()             { return _base; }
 558   Node* adr()              { return _adr; }
 559   MemNode* mem()           { return _mem; }
 560   int   scale_in_bytes()   { return _scale; }
 561   Node* invar()            { return _invar; }
 562   bool  negate_invar()     { return _negate_invar; }
 563   int   offset_in_bytes()  { return _offset; }
 564   int   memory_size()      { return _mem->memory_size(); }
 565   Node_Stack* node_stack() { return _nstack; }
 566 
 567   // Comparable?
 568   int cmp(SWPointer& q) {
 569     if (valid() && q.valid() &&
 570         (_adr == q._adr || _base == _adr && q._base == q._adr) &&
 571         _scale == q._scale   &&
 572         _invar == q._invar   &&
 573         _negate_invar == q._negate_invar) {
 574       bool overlap = q._offset <   _offset +   memory_size() &&
 575                        _offset < q._offset + q.memory_size();
 576       return overlap ? Equal : (_offset < q._offset ? Less : Greater);
 577     } else {
 578       return NotComparable;
 579     }
 580   }
 581 
 582   bool not_equal(SWPointer& q)    { return not_equal(cmp(q)); }
 583   bool equal(SWPointer& q)        { return equal(cmp(q)); }
 584   bool comparable(SWPointer& q)   { return comparable(cmp(q)); }
 585   static bool not_equal(int cmp)  { return cmp <= NotEqual; }
 586   static bool equal(int cmp)      { return cmp == Equal; }
 587   static bool comparable(int cmp) { return cmp < NotComparable; }
 588 
 589   void print();
 590 
 591 #ifndef PRODUCT
 592   class Tracer {
 593     friend class SuperWord;
 594     friend class SWPointer;
 595     SuperWord*   _slp;
 596     static int   _depth;
 597     int _depth_save;
 598     void print_depth();
 599     int  depth() const    { return _depth; }
 600     void set_depth(int d) { _depth = d; }
 601     void inc_depth()      { _depth++;}
 602     void dec_depth()      { if (_depth > 0) _depth--;}
 603     void store_depth()    {_depth_save = _depth;}
 604     void restore_depth()  {_depth = _depth_save;}
 605 
 606     class Depth {
 607       friend class Tracer;
 608       friend class SWPointer;
 609       friend class SuperWord;
 610       Depth()  { ++_depth; }
 611       Depth(int x)  { _depth = 0; }
 612       ~Depth() { if (_depth > 0) --_depth;}
 613     };
 614     Tracer (SuperWord* slp) : _slp(slp) {}
 615 
 616     // tracing functions
 617     void ctor_1(Node* mem);
 618     void ctor_2(Node* adr);
 619     void ctor_3(Node* adr, int i);
 620     void ctor_4(Node* adr, int i);
 621     void ctor_5(Node* adr, Node* base,  int i);
 622     void ctor_6(Node* mem);
 623 
 624     void invariant_1(Node *n, Node *n_c);
 625 
 626     void scaled_iv_plus_offset_1(Node* n);
 627     void scaled_iv_plus_offset_2(Node* n);
 628     void scaled_iv_plus_offset_3(Node* n);
 629     void scaled_iv_plus_offset_4(Node* n);
 630     void scaled_iv_plus_offset_5(Node* n);
 631     void scaled_iv_plus_offset_6(Node* n);
 632     void scaled_iv_plus_offset_7(Node* n);
 633     void scaled_iv_plus_offset_8(Node* n);
 634 
 635     void scaled_iv_1(Node* n);
 636     void scaled_iv_2(Node* n, int scale);
 637     void scaled_iv_3(Node* n, int scale);
 638     void scaled_iv_4(Node* n, int scale);
 639     void scaled_iv_5(Node* n, int scale);
 640     void scaled_iv_6(Node* n, int scale);
 641     void scaled_iv_7(Node* n);
 642     void scaled_iv_8(Node* n, SWPointer* tmp);
 643     void scaled_iv_9(Node* n, int _scale, int _offset, int mult);
 644     void scaled_iv_10(Node* n);
 645 
 646     void offset_plus_k_1(Node* n);
 647     void offset_plus_k_2(Node* n, int _offset);
 648     void offset_plus_k_3(Node* n, int _offset);
 649     void offset_plus_k_4(Node* n);
 650     void offset_plus_k_5(Node* n, Node* _invar);
 651     void offset_plus_k_6(Node* n, Node* _invar, bool _negate_invar, int _offset);
 652     void offset_plus_k_7(Node* n, Node* _invar, bool _negate_invar, int _offset);
 653     void offset_plus_k_8(Node* n, Node* _invar, bool _negate_invar, int _offset);
 654     void offset_plus_k_9(Node* n, Node* _invar, bool _negate_invar, int _offset);
 655     void offset_plus_k_10(Node* n, Node* _invar, bool _negate_invar, int _offset);
 656     void offset_plus_k_11(Node* n);
 657 
 658   } _tracer;//TRacer;
 659 #endif
 660 };
 661 
 662 #endif // SHARE_VM_OPTO_SUPERWORD_HPP