1 /*
   2  * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_VM_OPTO_SUPERWORD_HPP
  25 #define SHARE_VM_OPTO_SUPERWORD_HPP
  26 
  27 #include "opto/loopnode.hpp"
  28 #include "opto/node.hpp"
  29 #include "opto/phaseX.hpp"
  30 #include "opto/vectornode.hpp"
  31 #include "utilities/growableArray.hpp"
  32 #include "libadt/dict.hpp"
  33 
  34 //
  35 //                  S U P E R W O R D   T R A N S F O R M
  36 //
  37 // SuperWords are short, fixed length vectors.
  38 //
  39 // Algorithm from:
  40 //
  41 // Exploiting SuperWord Level Parallelism with
  42 //   Multimedia Instruction Sets
  43 // by
  44 //   Samuel Larsen and Saman Amarasinghe
  45 //   MIT Laboratory for Computer Science
  46 // date
  47 //   May 2000
  48 // published in
  49 //   ACM SIGPLAN Notices
  50 //   Proceedings of ACM PLDI '00,  Volume 35 Issue 5
  51 //
  52 // Definition 3.1 A Pack is an n-tuple, <s1, ...,sn>, where
  53 // s1,...,sn are independent isomorphic statements in a basic
  54 // block.
  55 //
  56 // Definition 3.2 A PackSet is a set of Packs.
  57 //
  58 // Definition 3.3 A Pair is a Pack of size two, where the
  59 // first statement is considered the left element, and the
  60 // second statement is considered the right element.
  61 
  62 class SWPointer;
  63 class OrderedPair;
  64 
  65 // ========================= Dependence Graph =====================
  66 
  67 class DepMem;
  68 
  69 //------------------------------DepEdge---------------------------
  70 // An edge in the dependence graph.  The edges incident to a dependence
  71 // node are threaded through _next_in for incoming edges and _next_out
  72 // for outgoing edges.
  73 class DepEdge : public ResourceObj {
  74  protected:
  75   DepMem* _pred;
  76   DepMem* _succ;
  77   DepEdge* _next_in;   // list of in edges, null terminated
  78   DepEdge* _next_out;  // list of out edges, null terminated
  79 
  80  public:
  81   DepEdge(DepMem* pred, DepMem* succ, DepEdge* next_in, DepEdge* next_out) :
  82     _pred(pred), _succ(succ), _next_in(next_in), _next_out(next_out) {}
  83 
  84   DepEdge* next_in()  { return _next_in; }
  85   DepEdge* next_out() { return _next_out; }
  86   DepMem*  pred()     { return _pred; }
  87   DepMem*  succ()     { return _succ; }
  88 
  89   void print();
  90 };
  91 
  92 //------------------------------DepMem---------------------------
  93 // A node in the dependence graph.  _in_head starts the threaded list of
  94 // incoming edges, and _out_head starts the list of outgoing edges.
  95 class DepMem : public ResourceObj {
  96  protected:
  97   Node*    _node;     // Corresponding ideal node
  98   DepEdge* _in_head;  // Head of list of in edges, null terminated
  99   DepEdge* _out_head; // Head of list of out edges, null terminated
 100 
 101  public:
 102   DepMem(Node* node) : _node(node), _in_head(NULL), _out_head(NULL) {}
 103 
 104   Node*    node()                { return _node;     }
 105   DepEdge* in_head()             { return _in_head;  }
 106   DepEdge* out_head()            { return _out_head; }
 107   void set_in_head(DepEdge* hd)  { _in_head = hd;    }
 108   void set_out_head(DepEdge* hd) { _out_head = hd;   }
 109 
 110   int in_cnt();  // Incoming edge count
 111   int out_cnt(); // Outgoing edge count
 112 
 113   void print();
 114 };
 115 
 116 //------------------------------DepGraph---------------------------
 117 class DepGraph {
 118  protected:
 119   Arena* _arena;
 120   GrowableArray<DepMem*> _map;
 121   DepMem* _root;
 122   DepMem* _tail;
 123 
 124  public:
 125   DepGraph(Arena* a) : _arena(a), _map(a, 8,  0, NULL) {
 126     _root = new (_arena) DepMem(NULL);
 127     _tail = new (_arena) DepMem(NULL);
 128   }
 129 
 130   DepMem* root() { return _root; }
 131   DepMem* tail() { return _tail; }
 132 
 133   // Return dependence node corresponding to an ideal node
 134   DepMem* dep(Node* node) { return _map.at(node->_idx); }
 135 
 136   // Make a new dependence graph node for an ideal node.
 137   DepMem* make_node(Node* node);
 138 
 139   // Make a new dependence graph edge dprec->dsucc
 140   DepEdge* make_edge(DepMem* dpred, DepMem* dsucc);
 141 
 142   DepEdge* make_edge(Node* pred,   Node* succ)   { return make_edge(dep(pred), dep(succ)); }
 143   DepEdge* make_edge(DepMem* pred, Node* succ)   { return make_edge(pred,      dep(succ)); }
 144   DepEdge* make_edge(Node* pred,   DepMem* succ) { return make_edge(dep(pred), succ);      }
 145 
 146   void init() { _map.clear(); } // initialize
 147 
 148   void print(Node* n)   { dep(n)->print(); }
 149   void print(DepMem* d) { d->print(); }
 150 };
 151 
 152 //------------------------------DepPreds---------------------------
 153 // Iterator over predecessors in the dependence graph and
 154 // non-memory-graph inputs of ideal nodes.
 155 class DepPreds : public StackObj {
 156 private:
 157   Node*    _n;
 158   int      _next_idx, _end_idx;
 159   DepEdge* _dep_next;
 160   Node*    _current;
 161   bool     _done;
 162 
 163 public:
 164   DepPreds(Node* n, DepGraph& dg);
 165   Node* current() { return _current; }
 166   bool  done()    { return _done; }
 167   void  next();
 168 };
 169 
 170 //------------------------------DepSuccs---------------------------
 171 // Iterator over successors in the dependence graph and
 172 // non-memory-graph outputs of ideal nodes.
 173 class DepSuccs : public StackObj {
 174 private:
 175   Node*    _n;
 176   int      _next_idx, _end_idx;
 177   DepEdge* _dep_next;
 178   Node*    _current;
 179   bool     _done;
 180 
 181 public:
 182   DepSuccs(Node* n, DepGraph& dg);
 183   Node* current() { return _current; }
 184   bool  done()    { return _done; }
 185   void  next();
 186 };
 187 
 188 
 189 // ========================= SuperWord =====================
 190 
 191 // -----------------------------SWNodeInfo---------------------------------
 192 // Per node info needed by SuperWord
 193 class SWNodeInfo {
 194  public:
 195   int         _alignment; // memory alignment for a node
 196   int         _depth;     // Max expression (DAG) depth from block start
 197   const Type* _velt_type; // vector element type
 198   Node_List*  _my_pack;   // pack containing this node
 199 
 200   SWNodeInfo() : _alignment(-1), _depth(0), _velt_type(NULL), _my_pack(NULL) {}
 201   static const SWNodeInfo initial;
 202 };
 203 
 204 class SuperWord;
 205 class CMoveKit {
 206  friend class SuperWord;
 207  private:
 208   SuperWord* _sw;
 209   Dict* _dict;
 210   CMoveKit(Arena* a, SuperWord* sw) : _sw(sw)  {_dict = new Dict(cmpkey, hashkey, a);}
 211   void*     _2p(Node* key)        const  { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
 212   Dict*     dict()                const  { return _dict; }
 213   void map(Node* key, Node_List* val)    { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); }
 214   void unmap(Node* key)                  { _dict->Delete(_2p(key)); }
 215   Node_List* pack(Node* key)      const  { return (Node_List*)_dict->operator[](_2p(key)); }
 216   Node* is_Bool_candidate(Node* nd) const; // if it is the right candidate return corresponding CMove* ,
 217   Node* is_CmpD_candidate(Node* nd) const; // otherwise return NULL
 218   Node_List* make_cmovevd_pack(Node_List* cmovd_pk);
 219   bool test_cmpd_pack(Node_List* cmpd_pk, Node_List* cmovd_pk);
 220 };//class CMoveKit
 221 
 222 // JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
 223 //------------------------------OrderedPair---------------------------
 224 // Ordered pair of Node*.
 225 class OrderedPair {
 226  protected:
 227   Node* _p1;
 228   Node* _p2;
 229  public:
 230   OrderedPair() : _p1(NULL), _p2(NULL) {}
 231   OrderedPair(Node* p1, Node* p2) {
 232     if (p1->_idx < p2->_idx) {
 233       _p1 = p1; _p2 = p2;
 234     } else {
 235       _p1 = p2; _p2 = p1;
 236     }
 237   }
 238 
 239   bool operator==(const OrderedPair &rhs) {
 240     return _p1 == rhs._p1 && _p2 == rhs._p2;
 241   }
 242   void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
 243 
 244   static const OrderedPair initial;
 245 };
 246 
 247 // -----------------------------SuperWord---------------------------------
 248 // Transforms scalar operations into packed (superword) operations.
 249 class SuperWord : public ResourceObj {
 250  friend class SWPointer;
 251  friend class CMoveKit;
 252  private:
 253   PhaseIdealLoop* _phase;
 254   Arena*          _arena;
 255   PhaseIterGVN   &_igvn;
 256 
 257   enum consts { top_align = -1, bottom_align = -666 };
 258 
 259   GrowableArray<Node_List*> _packset;    // Packs for the current block
 260 
 261   GrowableArray<int> _bb_idx;            // Map from Node _idx to index within block
 262 
 263   GrowableArray<Node*> _block;           // Nodes in current block
 264   GrowableArray<Node*> _post_block;      // Nodes in post loop block
 265   GrowableArray<Node*> _data_entry;      // Nodes with all inputs from outside
 266   GrowableArray<Node*> _mem_slice_head;  // Memory slice head nodes
 267   GrowableArray<Node*> _mem_slice_tail;  // Memory slice tail nodes
 268   GrowableArray<Node*> _iteration_first; // nodes in the generation that has deps from phi
 269   GrowableArray<Node*> _iteration_last;  // nodes in the generation that has deps to   phi
 270   GrowableArray<SWNodeInfo> _node_info;  // Info needed per node
 271   CloneMap&            _clone_map;       // map of nodes created in cloning
 272   CMoveKit             _cmovev_kit;      // support for vectorization of CMov
 273   MemNode* _align_to_ref;                // Memory reference that pre-loop will align to
 274 
 275   GrowableArray<OrderedPair> _disjoint_ptrs; // runtime disambiguated pointer pairs
 276 
 277   DepGraph _dg; // Dependence graph
 278 
 279   // Scratch pads
 280   VectorSet    _visited;       // Visited set
 281   VectorSet    _post_visited;  // Post-visited set
 282   Node_Stack   _n_idx_list;    // List of (node,index) pairs
 283   GrowableArray<Node*> _nlist; // List of nodes
 284   GrowableArray<Node*> _stk;   // Stack of nodes
 285 
 286  public:
 287   SuperWord(PhaseIdealLoop* phase);
 288 
 289   void transform_loop(IdealLoopTree* lpt, bool do_optimization);
 290 
 291   void unrolling_analysis(int &local_loop_unroll_factor);
 292 
 293   // Accessors for SWPointer
 294   PhaseIdealLoop* phase()          { return _phase; }
 295   IdealLoopTree* lpt()             { return _lpt; }
 296   PhiNode* iv()                    { return _iv; }
 297 
 298   bool early_return()              { return _early_return; }
 299 
 300 #ifndef PRODUCT
 301   bool     is_debug()              { return _vector_loop_debug > 0; }
 302   bool     is_trace_alignment()    { return (_vector_loop_debug & 2) > 0; }
 303   bool     is_trace_mem_slice()    { return (_vector_loop_debug & 4) > 0; }
 304   bool     is_trace_loop()         { return (_vector_loop_debug & 8) > 0; }
 305   bool     is_trace_adjacent()     { return (_vector_loop_debug & 16) > 0; }
 306   bool     is_trace_cmov()         { return (_vector_loop_debug & 32) > 0; }
 307   bool     is_trace_loop_reverse() { return (_vector_loop_debug & 64) > 0; }
 308 #endif
 309   bool     do_vector_loop()        { return _do_vector_loop; }
 310   bool     do_reserve_copy()       { return _do_reserve_copy; }
 311  private:
 312   IdealLoopTree* _lpt;             // Current loop tree node
 313   LoopNode*      _lp;              // Current LoopNode
 314   Node*          _bb;              // Current basic block
 315   PhiNode*       _iv;              // Induction var
 316   bool           _race_possible;   // In cases where SDMU is true
 317   bool           _early_return;    // True if we do not initialize
 318   bool           _do_vector_loop;  // whether to do vectorization/simd style
 319   bool           _do_reserve_copy; // do reserve copy of the graph(loop) before final modification in output
 320   int            _num_work_vecs;   // Number of non memory vector operations
 321   int            _num_reductions;  // Number of reduction expressions applied
 322   int            _ii_first;        // generation with direct deps from mem phi
 323   int            _ii_last;         // generation with direct deps to   mem phi
 324   GrowableArray<int> _ii_order;
 325 #ifndef PRODUCT
 326   uintx          _vector_loop_debug; // provide more printing in debug mode
 327 #endif
 328 
 329   // Accessors
 330   Arena* arena()                   { return _arena; }
 331 
 332   Node* bb()                       { return _bb; }
 333   void  set_bb(Node* bb)           { _bb = bb; }
 334 
 335   void set_lpt(IdealLoopTree* lpt) { _lpt = lpt; }
 336 
 337   LoopNode* lp()                   { return _lp; }
 338   void      set_lp(LoopNode* lp)   { _lp = lp;
 339                                      _iv = lp->as_CountedLoop()->phi()->as_Phi(); }
 340   int      iv_stride()             { return lp()->as_CountedLoop()->stride_con(); }
 341 
 342   int vector_width(Node* n) {
 343     BasicType bt = velt_basic_type(n);
 344     return MIN2(ABS(iv_stride()), Matcher::max_vector_size(bt));
 345   }
 346   int vector_width_in_bytes(Node* n) {
 347     BasicType bt = velt_basic_type(n);
 348     return vector_width(n)*type2aelembytes(bt);
 349   }
 350   MemNode* align_to_ref()            { return _align_to_ref; }
 351   void  set_align_to_ref(MemNode* m) { _align_to_ref = m; }
 352 
 353   Node* ctrl(Node* n) const { return _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; }
 354 
 355   // block accessors
 356   bool in_bb(Node* n)      { return n != NULL && n->outcnt() > 0 && ctrl(n) == _bb; }
 357   int  bb_idx(Node* n)     { assert(in_bb(n), "must be"); return _bb_idx.at(n->_idx); }
 358   void set_bb_idx(Node* n, int i) { _bb_idx.at_put_grow(n->_idx, i); }
 359 
 360   // visited set accessors
 361   void visited_clear()           { _visited.Clear(); }
 362   void visited_set(Node* n)      { return _visited.set(bb_idx(n)); }
 363   int visited_test(Node* n)      { return _visited.test(bb_idx(n)); }
 364   int visited_test_set(Node* n)  { return _visited.test_set(bb_idx(n)); }
 365   void post_visited_clear()      { _post_visited.Clear(); }
 366   void post_visited_set(Node* n) { return _post_visited.set(bb_idx(n)); }
 367   int post_visited_test(Node* n) { return _post_visited.test(bb_idx(n)); }
 368 
 369   // Ensure node_info contains element "i"
 370   void grow_node_info(int i) { if (i >= _node_info.length()) _node_info.at_put_grow(i, SWNodeInfo::initial); }
 371 
 372   // memory alignment for a node
 373   int alignment(Node* n)                     { return _node_info.adr_at(bb_idx(n))->_alignment; }
 374   void set_alignment(Node* n, int a)         { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_alignment = a; }
 375 
 376   // Max expression (DAG) depth from beginning of the block for each node
 377   int depth(Node* n)                         { return _node_info.adr_at(bb_idx(n))->_depth; }
 378   void set_depth(Node* n, int d)             { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_depth = d; }
 379 
 380   // vector element type
 381   const Type* velt_type(Node* n)             { return _node_info.adr_at(bb_idx(n))->_velt_type; }
 382   BasicType velt_basic_type(Node* n)         { return velt_type(n)->array_element_basic_type(); }
 383   void set_velt_type(Node* n, const Type* t) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_velt_type = t; }
 384   bool same_velt_type(Node* n1, Node* n2);
 385 
 386   // my_pack
 387   Node_List* my_pack(Node* n)                 { return !in_bb(n) ? NULL : _node_info.adr_at(bb_idx(n))->_my_pack; }
 388   void set_my_pack(Node* n, Node_List* p)     { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_my_pack = p; }
 389   // is pack good for converting into one vector node replacing 12 nodes of Cmp, Bool, CMov
 390   bool is_cmov_pack(Node_List* p);
 391   bool is_cmov_pack_internal_node(Node_List* p, Node* nd) { return is_cmov_pack(p) && !nd->is_CMove(); }
 392   // For pack p, are all idx operands the same?
 393   bool same_inputs(Node_List* p, int idx);
 394   // CloneMap utilities
 395   bool same_origin_idx(Node* a, Node* b) const;
 396   bool same_generation(Node* a, Node* b) const;
 397 
 398   // methods
 399 
 400   // Extract the superword level parallelism
 401   void SLP_extract();
 402   // Find the adjacent memory references and create pack pairs for them.
 403   void find_adjacent_refs();
 404   // Tracing support
 405   #ifndef PRODUCT
 406   void find_adjacent_refs_trace_1(Node* best_align_to_mem_ref, int best_iv_adjustment);
 407   void print_loop(bool whole);
 408   #endif
 409   // Find a memory reference to align the loop induction variable to.
 410   MemNode* find_align_to_ref(Node_List &memops);
 411   // Calculate loop's iv adjustment for this memory ops.
 412   int get_iv_adjustment(MemNode* mem);
 413   // Can the preloop align the reference to position zero in the vector?
 414   bool ref_is_alignable(SWPointer& p);
 415   // rebuild the graph so all loads in different iterations of cloned loop become dependant on phi node (in _do_vector_loop only)
 416   bool hoist_loads_in_graph();
 417   // Test whether MemNode::Memory dependency to the same load but in the first iteration of this loop is coming from memory phi
 418   // Return false if failed
 419   Node* find_phi_for_mem_dep(LoadNode* ld);
 420   // Return same node but from the first generation. Return 0, if not found
 421   Node* first_node(Node* nd);
 422   // Return same node as this but from the last generation. Return 0, if not found
 423   Node* last_node(Node* n);
 424   // Mark nodes belonging to first and last generation
 425   // returns first generation index or -1 if vectorization/simd is impossible
 426   int mark_generations();
 427   // swapping inputs of commutative instruction (Add or Mul)
 428   bool fix_commutative_inputs(Node* gold, Node* fix);
 429   // make packs forcefully (in _do_vector_loop only)
 430   bool pack_parallel();
 431   // Construct dependency graph.
 432   void dependence_graph();
 433   // Return a memory slice (node list) in predecessor order starting at "start"
 434   void mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds);
 435   // Can s1 and s2 be in a pack with s1 immediately preceding s2 and  s1 aligned at "align"
 436   bool stmts_can_pack(Node* s1, Node* s2, int align);
 437   // Does s exist in a pack at position pos?
 438   bool exists_at(Node* s, uint pos);
 439   // Is s1 immediately before s2 in memory?
 440   bool are_adjacent_refs(Node* s1, Node* s2);
 441   // Are s1 and s2 similar?
 442   bool isomorphic(Node* s1, Node* s2);
 443   // Is there no data path from s1 to s2 or s2 to s1?
 444   bool independent(Node* s1, Node* s2);
 445   // For a node pair (s1, s2) which is isomorphic and independent,
 446   // do s1 and s2 have similar input edges?
 447   bool have_similar_inputs(Node* s1, Node* s2);
 448   // Is there a data path between s1 and s2 and both are reductions?
 449   bool reduction(Node* s1, Node* s2);
 450   // Helper for independent
 451   bool independent_path(Node* shallow, Node* deep, uint dp=0);
 452   void set_alignment(Node* s1, Node* s2, int align);
 453   int data_size(Node* s);
 454   // Extend packset by following use->def and def->use links from pack members.
 455   void extend_packlist();
 456   // Extend the packset by visiting operand definitions of nodes in pack p
 457   bool follow_use_defs(Node_List* p);
 458   // Extend the packset by visiting uses of nodes in pack p
 459   bool follow_def_uses(Node_List* p);
 460   // For extended packsets, ordinally arrange uses packset by major component
 461   void order_def_uses(Node_List* p);
 462   // Estimate the savings from executing s1 and s2 as a pack
 463   int est_savings(Node* s1, Node* s2);
 464   int adjacent_profit(Node* s1, Node* s2);
 465   int pack_cost(int ct);
 466   int unpack_cost(int ct);
 467   // Combine packs A and B with A.last == B.first into A.first..,A.last,B.second,..B.last
 468   void combine_packs();
 469   // Construct the map from nodes to packs.
 470   void construct_my_pack_map();
 471   // Remove packs that are not implemented or not profitable.
 472   void filter_packs();
 473   // Merge CMoveD into new vector-nodes
 474   void merge_packs_to_cmovd();
 475   // Adjust the memory graph for the packed operations
 476   void schedule();
 477   // Remove "current" from its current position in the memory graph and insert
 478   // it after the appropriate insert points (lip or uip);
 479   void remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip, Node *uip, Unique_Node_List &schd_before);
 480   // Within a store pack, schedule stores together by moving out the sandwiched memory ops according
 481   // to dependence info; and within a load pack, move loads down to the last executed load.
 482   void co_locate_pack(Node_List* p);
 483   // Convert packs into vector node operations
 484   void output();
 485   // Create a vector operand for the nodes in pack p for operand: in(opd_idx)
 486   Node* vector_opd(Node_List* p, int opd_idx);
 487   // Can code be generated for pack p?
 488   bool implemented(Node_List* p);
 489   // For pack p, are all operands and all uses (with in the block) vector?
 490   bool profitable(Node_List* p);
 491   // If a use of pack p is not a vector use, then replace the use with an extract operation.
 492   void insert_extracts(Node_List* p);
 493   // Is use->in(u_idx) a vector use?
 494   bool is_vector_use(Node* use, int u_idx);
 495   // Construct reverse postorder list of block members
 496   bool construct_bb();
 497   // Initialize per node info
 498   void initialize_bb();
 499   // Insert n into block after pos
 500   void bb_insert_after(Node* n, int pos);
 501   // Compute max depth for expressions from beginning of block
 502   void compute_max_depth();
 503   // Compute necessary vector element type for expressions
 504   void compute_vector_element_type();
 505   // Are s1 and s2 in a pack pair and ordered as s1,s2?
 506   bool in_packset(Node* s1, Node* s2);
 507   // Is s in pack p?
 508   Node_List* in_pack(Node* s, Node_List* p);
 509   // Remove the pack at position pos in the packset
 510   void remove_pack_at(int pos);
 511   // Return the node executed first in pack p.
 512   Node* executed_first(Node_List* p);
 513   // Return the node executed last in pack p.
 514   Node* executed_last(Node_List* p);
 515   static LoadNode::ControlDependency control_dependency(Node_List* p);
 516   // Alignment within a vector memory reference
 517   int memory_alignment(MemNode* s, int iv_adjust);
 518   // (Start, end] half-open range defining which operands are vector
 519   void vector_opd_range(Node* n, uint* start, uint* end);
 520   // Smallest type containing range of values
 521   const Type* container_type(Node* n);
 522   // Adjust pre-loop limit so that in main loop, a load/store reference
 523   // to align_to_ref will be a position zero in the vector.
 524   void align_initial_loop_index(MemNode* align_to_ref);
 525   // Find pre loop end from main loop.  Returns null if none.
 526   CountedLoopEndNode* get_pre_loop_end(CountedLoopNode *cl);
 527   // Is the use of d1 in u1 at the same operand position as d2 in u2?
 528   bool opnd_positions_match(Node* d1, Node* u1, Node* d2, Node* u2);
 529   void init();
 530   // clean up some basic structures - used if the ideal graph was rebuilt
 531   void restart();
 532 
 533   // print methods
 534   void print_packset();
 535   void print_pack(Node_List* p);
 536   void print_bb();
 537   void print_stmt(Node* s);
 538   char* blank(uint depth);
 539 
 540   void packset_sort(int n);
 541 };
 542 
 543 
 544 
 545 //------------------------------SWPointer---------------------------
 546 // Information about an address for dependence checking and vector alignment
 547 class SWPointer {
 548  protected:
 549   MemNode*   _mem;           // My memory reference node
 550   SuperWord* _slp;           // SuperWord class
 551 
 552   Node* _base;               // NULL if unsafe nonheap reference
 553   Node* _adr;                // address pointer
 554   jint  _scale;              // multiplier for iv (in bytes), 0 if no loop iv
 555   jint  _offset;             // constant offset (in bytes)
 556   Node* _invar;              // invariant offset (in bytes), NULL if none
 557   bool  _negate_invar;       // if true then use: (0 - _invar)
 558   Node_Stack* _nstack;       // stack used to record a swpointer trace of variants
 559   bool        _analyze_only; // Used in loop unrolling only for swpointer trace
 560   uint        _stack_idx;    // Used in loop unrolling only for swpointer trace
 561 
 562   PhaseIdealLoop* phase() { return _slp->phase(); }
 563   IdealLoopTree*  lpt()   { return _slp->lpt(); }
 564   PhiNode*        iv()    { return _slp->iv();  } // Induction var
 565 
 566   bool invariant(Node* n);
 567 
 568   // Match: k*iv + offset
 569   bool scaled_iv_plus_offset(Node* n);
 570   // Match: k*iv where k is a constant that's not zero
 571   bool scaled_iv(Node* n);
 572   // Match: offset is (k [+/- invariant])
 573   bool offset_plus_k(Node* n, bool negate = false);
 574 
 575  public:
 576   enum CMP {
 577     Less          = 1,
 578     Greater       = 2,
 579     Equal         = 4,
 580     NotEqual      = (Less | Greater),
 581     NotComparable = (Less | Greater | Equal)
 582   };
 583 
 584   SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only);
 585   // Following is used to create a temporary object during
 586   // the pattern match of an address expression.
 587   SWPointer(SWPointer* p);
 588 
 589   bool valid()  { return _adr != NULL; }
 590   bool has_iv() { return _scale != 0; }
 591 
 592   Node* base()             { return _base; }
 593   Node* adr()              { return _adr; }
 594   MemNode* mem()           { return _mem; }
 595   int   scale_in_bytes()   { return _scale; }
 596   Node* invar()            { return _invar; }
 597   bool  negate_invar()     { return _negate_invar; }
 598   int   offset_in_bytes()  { return _offset; }
 599   int   memory_size()      { return _mem->memory_size(); }
 600   Node_Stack* node_stack() { return _nstack; }
 601 
 602   // Comparable?
 603   int cmp(SWPointer& q) {
 604     if (valid() && q.valid() &&
 605         (_adr == q._adr || (_base == _adr && q._base == q._adr)) &&
 606         _scale == q._scale   &&
 607         _invar == q._invar   &&
 608         _negate_invar == q._negate_invar) {
 609       bool overlap = q._offset <   _offset +   memory_size() &&
 610                        _offset < q._offset + q.memory_size();
 611       return overlap ? Equal : (_offset < q._offset ? Less : Greater);
 612     } else {
 613       return NotComparable;
 614     }
 615   }
 616 
 617   bool not_equal(SWPointer& q)    { return not_equal(cmp(q)); }
 618   bool equal(SWPointer& q)        { return equal(cmp(q)); }
 619   bool comparable(SWPointer& q)   { return comparable(cmp(q)); }
 620   static bool not_equal(int cmp)  { return cmp <= NotEqual; }
 621   static bool equal(int cmp)      { return cmp == Equal; }
 622   static bool comparable(int cmp) { return cmp < NotComparable; }
 623 
 624   void print();
 625 
 626 #ifndef PRODUCT
 627   class Tracer {
 628     friend class SuperWord;
 629     friend class SWPointer;
 630     SuperWord*   _slp;
 631     static int   _depth;
 632     int _depth_save;
 633     void print_depth();
 634     int  depth() const    { return _depth; }
 635     void set_depth(int d) { _depth = d; }
 636     void inc_depth()      { _depth++;}
 637     void dec_depth()      { if (_depth > 0) _depth--;}
 638     void store_depth()    {_depth_save = _depth;}
 639     void restore_depth()  {_depth = _depth_save;}
 640 
 641     class Depth {
 642       friend class Tracer;
 643       friend class SWPointer;
 644       friend class SuperWord;
 645       Depth()  { ++_depth; }
 646       Depth(int x)  { _depth = 0; }
 647       ~Depth() { if (_depth > 0) --_depth;}
 648     };
 649     Tracer (SuperWord* slp) : _slp(slp) {}
 650 
 651     // tracing functions
 652     void ctor_1(Node* mem);
 653     void ctor_2(Node* adr);
 654     void ctor_3(Node* adr, int i);
 655     void ctor_4(Node* adr, int i);
 656     void ctor_5(Node* adr, Node* base,  int i);
 657     void ctor_6(Node* mem);
 658 
 659     void invariant_1(Node *n, Node *n_c);
 660 
 661     void scaled_iv_plus_offset_1(Node* n);
 662     void scaled_iv_plus_offset_2(Node* n);
 663     void scaled_iv_plus_offset_3(Node* n);
 664     void scaled_iv_plus_offset_4(Node* n);
 665     void scaled_iv_plus_offset_5(Node* n);
 666     void scaled_iv_plus_offset_6(Node* n);
 667     void scaled_iv_plus_offset_7(Node* n);
 668     void scaled_iv_plus_offset_8(Node* n);
 669 
 670     void scaled_iv_1(Node* n);
 671     void scaled_iv_2(Node* n, int scale);
 672     void scaled_iv_3(Node* n, int scale);
 673     void scaled_iv_4(Node* n, int scale);
 674     void scaled_iv_5(Node* n, int scale);
 675     void scaled_iv_6(Node* n, int scale);
 676     void scaled_iv_7(Node* n);
 677     void scaled_iv_8(Node* n, SWPointer* tmp);
 678     void scaled_iv_9(Node* n, int _scale, int _offset, int mult);
 679     void scaled_iv_10(Node* n);
 680 
 681     void offset_plus_k_1(Node* n);
 682     void offset_plus_k_2(Node* n, int _offset);
 683     void offset_plus_k_3(Node* n, int _offset);
 684     void offset_plus_k_4(Node* n);
 685     void offset_plus_k_5(Node* n, Node* _invar);
 686     void offset_plus_k_6(Node* n, Node* _invar, bool _negate_invar, int _offset);
 687     void offset_plus_k_7(Node* n, Node* _invar, bool _negate_invar, int _offset);
 688     void offset_plus_k_8(Node* n, Node* _invar, bool _negate_invar, int _offset);
 689     void offset_plus_k_9(Node* n, Node* _invar, bool _negate_invar, int _offset);
 690     void offset_plus_k_10(Node* n, Node* _invar, bool _negate_invar, int _offset);
 691     void offset_plus_k_11(Node* n);
 692 
 693   } _tracer;//TRacer;
 694 #endif
 695 };
 696 
 697 #endif // SHARE_VM_OPTO_SUPERWORD_HPP