1 /*
   2  * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_VM_OPTO_SUPERWORD_HPP
  25 #define SHARE_VM_OPTO_SUPERWORD_HPP
  26 
  27 #include "opto/loopnode.hpp"
  28 #include "opto/node.hpp"
  29 #include "opto/phaseX.hpp"
  30 #include "opto/vectornode.hpp"
  31 #include "utilities/growableArray.hpp"
  32 #include "libadt/dict.hpp"
  33 
  34 //
  35 //                  S U P E R W O R D   T R A N S F O R M
  36 //
  37 // SuperWords are short, fixed length vectors.
  38 //
  39 // Algorithm from:
  40 //
  41 // Exploiting SuperWord Level Parallelism with
  42 //   Multimedia Instruction Sets
  43 // by
  44 //   Samuel Larsen and Saman Amarasinghe
  45 //   MIT Laboratory for Computer Science
  46 // date
  47 //   May 2000
  48 // published in
  49 //   ACM SIGPLAN Notices
  50 //   Proceedings of ACM PLDI '00,  Volume 35 Issue 5
  51 //
  52 // Definition 3.1 A Pack is an n-tuple, <s1, ...,sn>, where
  53 // s1,...,sn are independent isomorphic statements in a basic
  54 // block.
  55 //
  56 // Definition 3.2 A PackSet is a set of Packs.
  57 //
  58 // Definition 3.3 A Pair is a Pack of size two, where the
  59 // first statement is considered the left element, and the
  60 // second statement is considered the right element.
  61 
  62 class SWPointer;
  63 class OrderedPair;
  64 
  65 // ========================= Dependence Graph =====================
  66 
  67 class DepMem;
  68 
  69 //------------------------------DepEdge---------------------------
  70 // An edge in the dependence graph.  The edges incident to a dependence
  71 // node are threaded through _next_in for incoming edges and _next_out
  72 // for outgoing edges.
  73 class DepEdge : public ResourceObj {
  74  protected:
  75   DepMem* _pred;
  76   DepMem* _succ;
  77   DepEdge* _next_in;   // list of in edges, null terminated
  78   DepEdge* _next_out;  // list of out edges, null terminated
  79 
  80  public:
  81   DepEdge(DepMem* pred, DepMem* succ, DepEdge* next_in, DepEdge* next_out) :
  82     _pred(pred), _succ(succ), _next_in(next_in), _next_out(next_out) {}
  83 
  84   DepEdge* next_in()  { return _next_in; }
  85   DepEdge* next_out() { return _next_out; }
  86   DepMem*  pred()     { return _pred; }
  87   DepMem*  succ()     { return _succ; }
  88 
  89   void print();
  90 };
  91 
  92 //------------------------------DepMem---------------------------
  93 // A node in the dependence graph.  _in_head starts the threaded list of
  94 // incoming edges, and _out_head starts the list of outgoing edges.
  95 class DepMem : public ResourceObj {
  96  protected:
  97   Node*    _node;     // Corresponding ideal node
  98   DepEdge* _in_head;  // Head of list of in edges, null terminated
  99   DepEdge* _out_head; // Head of list of out edges, null terminated
 100 
 101  public:
 102   DepMem(Node* node) : _node(node), _in_head(NULL), _out_head(NULL) {}
 103 
 104   Node*    node()                { return _node;     }
 105   DepEdge* in_head()             { return _in_head;  }
 106   DepEdge* out_head()            { return _out_head; }
 107   void set_in_head(DepEdge* hd)  { _in_head = hd;    }
 108   void set_out_head(DepEdge* hd) { _out_head = hd;   }
 109 
 110   int in_cnt();  // Incoming edge count
 111   int out_cnt(); // Outgoing edge count
 112 
 113   void print();
 114 };
 115 
 116 //------------------------------DepGraph---------------------------
 117 class DepGraph VALUE_OBJ_CLASS_SPEC {
 118  protected:
 119   Arena* _arena;
 120   GrowableArray<DepMem*> _map;
 121   DepMem* _root;
 122   DepMem* _tail;
 123 
 124  public:
 125   DepGraph(Arena* a) : _arena(a), _map(a, 8,  0, NULL) {
 126     _root = new (_arena) DepMem(NULL);
 127     _tail = new (_arena) DepMem(NULL);
 128   }
 129 
 130   DepMem* root() { return _root; }
 131   DepMem* tail() { return _tail; }
 132 
 133   // Return dependence node corresponding to an ideal node
 134   DepMem* dep(Node* node) { return _map.at(node->_idx); }
 135 
 136   // Make a new dependence graph node for an ideal node.
 137   DepMem* make_node(Node* node);
 138 
 139   // Make a new dependence graph edge dprec->dsucc
 140   DepEdge* make_edge(DepMem* dpred, DepMem* dsucc);
 141 
 142   DepEdge* make_edge(Node* pred,   Node* succ)   { return make_edge(dep(pred), dep(succ)); }
 143   DepEdge* make_edge(DepMem* pred, Node* succ)   { return make_edge(pred,      dep(succ)); }
 144   DepEdge* make_edge(Node* pred,   DepMem* succ) { return make_edge(dep(pred), succ);      }
 145 
 146   void init() { _map.clear(); } // initialize
 147 
 148   void print(Node* n)   { dep(n)->print(); }
 149   void print(DepMem* d) { d->print(); }
 150 };
 151 
 152 //------------------------------DepPreds---------------------------
 153 // Iterator over predecessors in the dependence graph and
 154 // non-memory-graph inputs of ideal nodes.
 155 class DepPreds : public StackObj {
 156 private:
 157   Node*    _n;
 158   int      _next_idx, _end_idx;
 159   DepEdge* _dep_next;
 160   Node*    _current;
 161   bool     _done;
 162 
 163 public:
 164   DepPreds(Node* n, DepGraph& dg);
 165   Node* current() { return _current; }
 166   bool  done()    { return _done; }
 167   void  next();
 168 };
 169 
 170 //------------------------------DepSuccs---------------------------
 171 // Iterator over successors in the dependence graph and
 172 // non-memory-graph outputs of ideal nodes.
 173 class DepSuccs : public StackObj {
 174 private:
 175   Node*    _n;
 176   int      _next_idx, _end_idx;
 177   DepEdge* _dep_next;
 178   Node*    _current;
 179   bool     _done;
 180 
 181 public:
 182   DepSuccs(Node* n, DepGraph& dg);
 183   Node* current() { return _current; }
 184   bool  done()    { return _done; }
 185   void  next();
 186 };
 187 
 188 
 189 // ========================= SuperWord =====================
 190 
 191 // -----------------------------SWNodeInfo---------------------------------
 192 // Per node info needed by SuperWord
 193 class SWNodeInfo VALUE_OBJ_CLASS_SPEC {
 194  public:
 195   int         _alignment; // memory alignment for a node
 196   int         _depth;     // Max expression (DAG) depth from block start
 197   const Type* _velt_type; // vector element type
 198   Node_List*  _my_pack;   // pack containing this node
 199 
 200   SWNodeInfo() : _alignment(-1), _depth(0), _velt_type(NULL), _my_pack(NULL) {}
 201   static const SWNodeInfo initial;
 202 };
 203 
 204 class SuperWord;
 205 class CMoveKit {
 206  friend class SuperWord;
 207  private:
 208   SuperWord* _sw;
 209   Dict* _dict;
 210   CMoveKit(Arena* a, SuperWord* sw) : _sw(sw)  {_dict = new Dict(cmpkey, hashkey, a);}
 211   void*     _2p(Node* key)        const  { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
 212   Dict*     dict()                const  { return _dict; }
 213   void map(Node* key, Node_List* val)    { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); }
 214   void unmap(Node* key)                  { _dict->Delete(_2p(key)); }
 215   Node_List* pack(Node* key)      const  { return (Node_List*)_dict->operator[](_2p(key)); }
 216   Node* is_Bool_candidate(Node* nd) const; // if it is the right candidate return corresponding CMove* ,
 217   Node* is_CmpD_candidate(Node* nd) const; // otherwise return NULL
 218   Node_List* make_cmovevd_pack(Node_List* cmovd_pk);
 219   bool test_cmpd_pack(Node_List* cmpd_pk, Node_List* cmovd_pk);
 220 };//class CMoveKit
 221 
 222 // JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
 223 //------------------------------OrderedPair---------------------------
 224 // Ordered pair of Node*.
 225 class OrderedPair VALUE_OBJ_CLASS_SPEC {
 226  protected:
 227   Node* _p1;
 228   Node* _p2;
 229  public:
 230   OrderedPair() : _p1(NULL), _p2(NULL) {}
 231   OrderedPair(Node* p1, Node* p2) {
 232     if (p1->_idx < p2->_idx) {
 233       _p1 = p1; _p2 = p2;
 234     } else {
 235       _p1 = p2; _p2 = p1;
 236     }
 237   }
 238 
 239   bool operator==(const OrderedPair &rhs) {
 240     return _p1 == rhs._p1 && _p2 == rhs._p2;
 241   }
 242   void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
 243 
 244   static const OrderedPair initial;
 245 };
 246 
 247 // -----------------------------SuperWord---------------------------------
 248 // Transforms scalar operations into packed (superword) operations.
 249 class SuperWord : public ResourceObj {
 250  friend class SWPointer;
 251  friend class CMoveKit;
 252  private:
 253   PhaseIdealLoop* _phase;
 254   Arena*          _arena;
 255   PhaseIterGVN   &_igvn;
 256 
 257   enum consts { top_align = -1, bottom_align = -666 };
 258 
 259   GrowableArray<Node_List*> _packset;    // Packs for the current block
 260 
 261   GrowableArray<int> _bb_idx;            // Map from Node _idx to index within block
 262 
 263   GrowableArray<Node*> _block;           // Nodes in current block
 264   GrowableArray<Node*> _data_entry;      // Nodes with all inputs from outside
 265   GrowableArray<Node*> _mem_slice_head;  // Memory slice head nodes
 266   GrowableArray<Node*> _mem_slice_tail;  // Memory slice tail nodes
 267   GrowableArray<Node*> _iteration_first; // nodes in the generation that has deps from phi
 268   GrowableArray<Node*> _iteration_last;  // nodes in the generation that has deps to   phi
 269   GrowableArray<SWNodeInfo> _node_info;  // Info needed per node
 270   CloneMap&            _clone_map;       // map of nodes created in cloning
 271   CMoveKit             _cmovev_kit;      // support for vectorization of CMov
 272   MemNode* _align_to_ref;                // Memory reference that pre-loop will align to
 273 
 274   GrowableArray<OrderedPair> _disjoint_ptrs; // runtime disambiguated pointer pairs
 275 
 276   DepGraph _dg; // Dependence graph
 277 
 278   // Scratch pads
 279   VectorSet    _visited;       // Visited set
 280   VectorSet    _post_visited;  // Post-visited set
 281   Node_Stack   _n_idx_list;    // List of (node,index) pairs
 282   GrowableArray<Node*> _nlist; // List of nodes
 283   GrowableArray<Node*> _stk;   // Stack of nodes
 284 
 285  public:
 286   SuperWord(PhaseIdealLoop* phase);
 287 
 288   void transform_loop(IdealLoopTree* lpt, bool do_optimization);
 289 
 290   void unrolling_analysis(int &local_loop_unroll_factor);
 291 
 292   // Accessors for SWPointer
 293   PhaseIdealLoop* phase()          { return _phase; }
 294   IdealLoopTree* lpt()             { return _lpt; }
 295   PhiNode* iv()                    { return _iv; }
 296 
 297   bool early_return()              { return _early_return; }
 298 
 299 #ifndef PRODUCT
 300   bool     is_debug()              { return _vector_loop_debug > 0; }
 301   bool     is_trace_alignment()    { return (_vector_loop_debug & 2) > 0; }
 302   bool     is_trace_mem_slice()    { return (_vector_loop_debug & 4) > 0; }
 303   bool     is_trace_loop()         { return (_vector_loop_debug & 8) > 0; }
 304   bool     is_trace_adjacent()     { return (_vector_loop_debug & 16) > 0; }
 305   bool     is_trace_cmov()         { return (_vector_loop_debug & 32) > 0; }
 306   bool     is_trace_loop_reverse() { return (_vector_loop_debug & 64) > 0; }
 307 #endif
 308   bool     do_vector_loop()        { return _do_vector_loop; }
 309   bool     do_reserve_copy()       { return _do_reserve_copy; }
 310  private:
 311   IdealLoopTree* _lpt;             // Current loop tree node
 312   LoopNode*      _lp;              // Current LoopNode
 313   Node*          _bb;              // Current basic block
 314   PhiNode*       _iv;              // Induction var
 315   bool           _race_possible;   // In cases where SDMU is true
 316   bool           _early_return;    // True if we do not initialize
 317   bool           _do_vector_loop;  // whether to do vectorization/simd style
 318   bool           _do_reserve_copy; // do reserve copy of the graph(loop) before final modification in output
 319   int            _num_work_vecs;   // Number of non memory vector operations
 320   int            _num_reductions;  // Number of reduction expressions applied
 321   int            _ii_first;        // generation with direct deps from mem phi
 322   int            _ii_last;         // generation with direct deps to   mem phi
 323   GrowableArray<int> _ii_order;
 324 #ifndef PRODUCT
 325   uintx          _vector_loop_debug; // provide more printing in debug mode
 326 #endif
 327 
 328   // Accessors
 329   Arena* arena()                   { return _arena; }
 330 
 331   Node* bb()                       { return _bb; }
 332   void  set_bb(Node* bb)           { _bb = bb; }
 333 
 334   void set_lpt(IdealLoopTree* lpt) { _lpt = lpt; }
 335 
 336   LoopNode* lp()                   { return _lp; }
 337   void      set_lp(LoopNode* lp)   { _lp = lp;
 338                                      _iv = lp->as_CountedLoop()->phi()->as_Phi(); }
 339   int      iv_stride()             { return lp()->as_CountedLoop()->stride_con(); }
 340 
 341   int vector_width(Node* n) {
 342     BasicType bt = velt_basic_type(n);
 343     return MIN2(ABS(iv_stride()), Matcher::max_vector_size(bt));
 344   }
 345   int vector_width_in_bytes(Node* n) {
 346     BasicType bt = velt_basic_type(n);
 347     return vector_width(n)*type2aelembytes(bt);
 348   }
 349   MemNode* align_to_ref()            { return _align_to_ref; }
 350   void  set_align_to_ref(MemNode* m) { _align_to_ref = m; }
 351 
 352   Node* ctrl(Node* n) const { return _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; }
 353 
 354   // block accessors
 355   bool in_bb(Node* n)      { return n != NULL && n->outcnt() > 0 && ctrl(n) == _bb; }
 356   int  bb_idx(Node* n)     { assert(in_bb(n), "must be"); return _bb_idx.at(n->_idx); }
 357   void set_bb_idx(Node* n, int i) { _bb_idx.at_put_grow(n->_idx, i); }
 358 
 359   // visited set accessors
 360   void visited_clear()           { _visited.Clear(); }
 361   void visited_set(Node* n)      { return _visited.set(bb_idx(n)); }
 362   int visited_test(Node* n)      { return _visited.test(bb_idx(n)); }
 363   int visited_test_set(Node* n)  { return _visited.test_set(bb_idx(n)); }
 364   void post_visited_clear()      { _post_visited.Clear(); }
 365   void post_visited_set(Node* n) { return _post_visited.set(bb_idx(n)); }
 366   int post_visited_test(Node* n) { return _post_visited.test(bb_idx(n)); }
 367 
 368   // Ensure node_info contains element "i"
 369   void grow_node_info(int i) { if (i >= _node_info.length()) _node_info.at_put_grow(i, SWNodeInfo::initial); }
 370 
 371   // memory alignment for a node
 372   int alignment(Node* n)                     { return _node_info.adr_at(bb_idx(n))->_alignment; }
 373   void set_alignment(Node* n, int a)         { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_alignment = a; }
 374 
 375   // Max expression (DAG) depth from beginning of the block for each node
 376   int depth(Node* n)                         { return _node_info.adr_at(bb_idx(n))->_depth; }
 377   void set_depth(Node* n, int d)             { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_depth = d; }
 378 
 379   // vector element type
 380   const Type* velt_type(Node* n)             { return _node_info.adr_at(bb_idx(n))->_velt_type; }
 381   BasicType velt_basic_type(Node* n)         { return velt_type(n)->array_element_basic_type(); }
 382   void set_velt_type(Node* n, const Type* t) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_velt_type = t; }
 383   bool same_velt_type(Node* n1, Node* n2);
 384 
 385   // my_pack
 386   Node_List* my_pack(Node* n)                 { return !in_bb(n) ? NULL : _node_info.adr_at(bb_idx(n))->_my_pack; }
 387   void set_my_pack(Node* n, Node_List* p)     { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_my_pack = p; }
 388   // is pack good for converting into one vector node replacing 12 nodes of Cmp, Bool, CMov
 389   bool is_cmov_pack(Node_List* p);
 390   bool is_cmov_pack_internal_node(Node_List* p, Node* nd) { return is_cmov_pack(p) && !nd->is_CMove(); }
 391   // For pack p, are all idx operands the same?
 392   bool same_inputs(Node_List* p, int idx);
 393   // CloneMap utilities
 394   bool same_origin_idx(Node* a, Node* b) const;
 395   bool same_generation(Node* a, Node* b) const;
 396 
 397   // methods
 398 
 399   // Extract the superword level parallelism
 400   void SLP_extract();
 401   // Find the adjacent memory references and create pack pairs for them.
 402   void find_adjacent_refs();
 403   // Tracing support
 404   #ifndef PRODUCT
 405   void find_adjacent_refs_trace_1(Node* best_align_to_mem_ref, int best_iv_adjustment);
 406   void print_loop(bool whole);
 407   #endif
 408   // Find a memory reference to align the loop induction variable to.
 409   MemNode* find_align_to_ref(Node_List &memops);
 410   // Calculate loop's iv adjustment for this memory ops.
 411   int get_iv_adjustment(MemNode* mem);
 412   // Can the preloop align the reference to position zero in the vector?
 413   bool ref_is_alignable(SWPointer& p);
 414   // rebuild the graph so all loads in different iterations of cloned loop become dependant on phi node (in _do_vector_loop only)
 415   bool hoist_loads_in_graph();
 416   // Test whether MemNode::Memory dependency to the same load but in the first iteration of this loop is coming from memory phi
 417   // Return false if failed
 418   Node* find_phi_for_mem_dep(LoadNode* ld);
 419   // Return same node but from the first generation. Return 0, if not found
 420   Node* first_node(Node* nd);
 421   // Return same node as this but from the last generation. Return 0, if not found
 422   Node* last_node(Node* n);
 423   // Mark nodes belonging to first and last generation
 424   // returns first generation index or -1 if vectorization/simd is impossible
 425   int mark_generations();
 426   // swapping inputs of commutative instruction (Add or Mul)
 427   bool fix_commutative_inputs(Node* gold, Node* fix);
 428   // make packs forcefully (in _do_vector_loop only)
 429   bool pack_parallel();
 430   // Construct dependency graph.
 431   void dependence_graph();
 432   // Return a memory slice (node list) in predecessor order starting at "start"
 433   void mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds);
 434   // Can s1 and s2 be in a pack with s1 immediately preceding s2 and  s1 aligned at "align"
 435   bool stmts_can_pack(Node* s1, Node* s2, int align);
 436   // Does s exist in a pack at position pos?
 437   bool exists_at(Node* s, uint pos);
 438   // Is s1 immediately before s2 in memory?
 439   bool are_adjacent_refs(Node* s1, Node* s2);
 440   // Are s1 and s2 similar?
 441   bool isomorphic(Node* s1, Node* s2);
 442   // Is there no data path from s1 to s2 or s2 to s1?
 443   bool independent(Node* s1, Node* s2);
 444   // Is there a data path between s1 and s2 and both are reductions?
 445   bool reduction(Node* s1, Node* s2);
 446   // Helper for independent
 447   bool independent_path(Node* shallow, Node* deep, uint dp=0);
 448   void set_alignment(Node* s1, Node* s2, int align);
 449   int data_size(Node* s);
 450   // Extend packset by following use->def and def->use links from pack members.
 451   void extend_packlist();
 452   // Extend the packset by visiting operand definitions of nodes in pack p
 453   bool follow_use_defs(Node_List* p);
 454   // Extend the packset by visiting uses of nodes in pack p
 455   bool follow_def_uses(Node_List* p);
 456   // For extended packsets, ordinally arrange uses packset by major component
 457   void order_def_uses(Node_List* p);
 458   // Estimate the savings from executing s1 and s2 as a pack
 459   int est_savings(Node* s1, Node* s2);
 460   int adjacent_profit(Node* s1, Node* s2);
 461   int pack_cost(int ct);
 462   int unpack_cost(int ct);
 463   // Combine packs A and B with A.last == B.first into A.first..,A.last,B.second,..B.last
 464   void combine_packs();
 465   // Construct the map from nodes to packs.
 466   void construct_my_pack_map();
 467   // Remove packs that are not implemented or not profitable.
 468   void filter_packs();
 469   // Merge CMoveD into new vector-nodes
 470   void merge_packs_to_cmovd();
 471   // Adjust the memory graph for the packed operations
 472   void schedule();
 473   // Remove "current" from its current position in the memory graph and insert
 474   // it after the appropriate insert points (lip or uip);
 475   void remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip, Node *uip, Unique_Node_List &schd_before);
 476   // Within a store pack, schedule stores together by moving out the sandwiched memory ops according
 477   // to dependence info; and within a load pack, move loads down to the last executed load.
 478   void co_locate_pack(Node_List* p);
 479   // Convert packs into vector node operations
 480   void output();
 481   // Create a vector operand for the nodes in pack p for operand: in(opd_idx)
 482   Node* vector_opd(Node_List* p, int opd_idx);
 483   // Can code be generated for pack p?
 484   bool implemented(Node_List* p);
 485   // For pack p, are all operands and all uses (with in the block) vector?
 486   bool profitable(Node_List* p);
 487   // If a use of pack p is not a vector use, then replace the use with an extract operation.
 488   void insert_extracts(Node_List* p);
 489   // Is use->in(u_idx) a vector use?
 490   bool is_vector_use(Node* use, int u_idx);
 491   // Construct reverse postorder list of block members
 492   bool construct_bb();
 493   // Initialize per node info
 494   void initialize_bb();
 495   // Insert n into block after pos
 496   void bb_insert_after(Node* n, int pos);
 497   // Compute max depth for expressions from beginning of block
 498   void compute_max_depth();
 499   // Compute necessary vector element type for expressions
 500   void compute_vector_element_type();
 501   // Are s1 and s2 in a pack pair and ordered as s1,s2?
 502   bool in_packset(Node* s1, Node* s2);
 503   // Is s in pack p?
 504   Node_List* in_pack(Node* s, Node_List* p);
 505   // Remove the pack at position pos in the packset
 506   void remove_pack_at(int pos);
 507   // Return the node executed first in pack p.
 508   Node* executed_first(Node_List* p);
 509   // Return the node executed last in pack p.
 510   Node* executed_last(Node_List* p);
 511   static LoadNode::ControlDependency control_dependency(Node_List* p);
 512   // Alignment within a vector memory reference
 513   int memory_alignment(MemNode* s, int iv_adjust);
 514   // (Start, end] half-open range defining which operands are vector
 515   void vector_opd_range(Node* n, uint* start, uint* end);
 516   // Smallest type containing range of values
 517   const Type* container_type(Node* n);
 518   // Adjust pre-loop limit so that in main loop, a load/store reference
 519   // to align_to_ref will be a position zero in the vector.
 520   void align_initial_loop_index(MemNode* align_to_ref);
 521   // Find pre loop end from main loop.  Returns null if none.
 522   CountedLoopEndNode* get_pre_loop_end(CountedLoopNode *cl);
 523   // Is the use of d1 in u1 at the same operand position as d2 in u2?
 524   bool opnd_positions_match(Node* d1, Node* u1, Node* d2, Node* u2);
 525   void init();
 526   // clean up some basic structures - used if the ideal graph was rebuilt
 527   void restart();
 528 
 529   // print methods
 530   void print_packset();
 531   void print_pack(Node_List* p);
 532   void print_bb();
 533   void print_stmt(Node* s);
 534   char* blank(uint depth);
 535 
 536   void packset_sort(int n);
 537 };
 538 
 539 
 540 
 541 //------------------------------SWPointer---------------------------
 542 // Information about an address for dependence checking and vector alignment
 543 class SWPointer VALUE_OBJ_CLASS_SPEC {
 544  protected:
 545   MemNode*   _mem;           // My memory reference node
 546   SuperWord* _slp;           // SuperWord class
 547 
 548   Node* _base;               // NULL if unsafe nonheap reference
 549   Node* _adr;                // address pointer
 550   jint  _scale;              // multiplier for iv (in bytes), 0 if no loop iv
 551   jint  _offset;             // constant offset (in bytes)
 552   Node* _invar;              // invariant offset (in bytes), NULL if none
 553   bool  _negate_invar;       // if true then use: (0 - _invar)
 554   Node_Stack* _nstack;       // stack used to record a swpointer trace of variants
 555   bool        _analyze_only; // Used in loop unrolling only for swpointer trace
 556   uint        _stack_idx;    // Used in loop unrolling only for swpointer trace
 557 
 558   PhaseIdealLoop* phase() { return _slp->phase(); }
 559   IdealLoopTree*  lpt()   { return _slp->lpt(); }
 560   PhiNode*        iv()    { return _slp->iv();  } // Induction var
 561 
 562   bool invariant(Node* n);
 563 
 564   // Match: k*iv + offset
 565   bool scaled_iv_plus_offset(Node* n);
 566   // Match: k*iv where k is a constant that's not zero
 567   bool scaled_iv(Node* n);
 568   // Match: offset is (k [+/- invariant])
 569   bool offset_plus_k(Node* n, bool negate = false);
 570 
 571  public:
 572   enum CMP {
 573     Less          = 1,
 574     Greater       = 2,
 575     Equal         = 4,
 576     NotEqual      = (Less | Greater),
 577     NotComparable = (Less | Greater | Equal)
 578   };
 579 
 580   SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only);
 581   // Following is used to create a temporary object during
 582   // the pattern match of an address expression.
 583   SWPointer(SWPointer* p);
 584 
 585   bool valid()  { return _adr != NULL; }
 586   bool has_iv() { return _scale != 0; }
 587 
 588   Node* base()             { return _base; }
 589   Node* adr()              { return _adr; }
 590   MemNode* mem()           { return _mem; }
 591   int   scale_in_bytes()   { return _scale; }
 592   Node* invar()            { return _invar; }
 593   bool  negate_invar()     { return _negate_invar; }
 594   int   offset_in_bytes()  { return _offset; }
 595   int   memory_size()      { return _mem->memory_size(); }
 596   Node_Stack* node_stack() { return _nstack; }
 597 
 598   // Comparable?
 599   int cmp(SWPointer& q) {
 600     if (valid() && q.valid() &&
 601         (_adr == q._adr || _base == _adr && q._base == q._adr) &&
 602         _scale == q._scale   &&
 603         _invar == q._invar   &&
 604         _negate_invar == q._negate_invar) {
 605       bool overlap = q._offset <   _offset +   memory_size() &&
 606                        _offset < q._offset + q.memory_size();
 607       return overlap ? Equal : (_offset < q._offset ? Less : Greater);
 608     } else {
 609       return NotComparable;
 610     }
 611   }
 612 
 613   bool not_equal(SWPointer& q)    { return not_equal(cmp(q)); }
 614   bool equal(SWPointer& q)        { return equal(cmp(q)); }
 615   bool comparable(SWPointer& q)   { return comparable(cmp(q)); }
 616   static bool not_equal(int cmp)  { return cmp <= NotEqual; }
 617   static bool equal(int cmp)      { return cmp == Equal; }
 618   static bool comparable(int cmp) { return cmp < NotComparable; }
 619 
 620   void print();
 621 
 622 #ifndef PRODUCT
 623   class Tracer {
 624     friend class SuperWord;
 625     friend class SWPointer;
 626     SuperWord*   _slp;
 627     static int   _depth;
 628     int _depth_save;
 629     void print_depth();
 630     int  depth() const    { return _depth; }
 631     void set_depth(int d) { _depth = d; }
 632     void inc_depth()      { _depth++;}
 633     void dec_depth()      { if (_depth > 0) _depth--;}
 634     void store_depth()    {_depth_save = _depth;}
 635     void restore_depth()  {_depth = _depth_save;}
 636 
 637     class Depth {
 638       friend class Tracer;
 639       friend class SWPointer;
 640       friend class SuperWord;
 641       Depth()  { ++_depth; }
 642       Depth(int x)  { _depth = 0; }
 643       ~Depth() { if (_depth > 0) --_depth;}
 644     };
 645     Tracer (SuperWord* slp) : _slp(slp) {}
 646 
 647     // tracing functions
 648     void ctor_1(Node* mem);
 649     void ctor_2(Node* adr);
 650     void ctor_3(Node* adr, int i);
 651     void ctor_4(Node* adr, int i);
 652     void ctor_5(Node* adr, Node* base,  int i);
 653     void ctor_6(Node* mem);
 654 
 655     void invariant_1(Node *n, Node *n_c);
 656 
 657     void scaled_iv_plus_offset_1(Node* n);
 658     void scaled_iv_plus_offset_2(Node* n);
 659     void scaled_iv_plus_offset_3(Node* n);
 660     void scaled_iv_plus_offset_4(Node* n);
 661     void scaled_iv_plus_offset_5(Node* n);
 662     void scaled_iv_plus_offset_6(Node* n);
 663     void scaled_iv_plus_offset_7(Node* n);
 664     void scaled_iv_plus_offset_8(Node* n);
 665 
 666     void scaled_iv_1(Node* n);
 667     void scaled_iv_2(Node* n, int scale);
 668     void scaled_iv_3(Node* n, int scale);
 669     void scaled_iv_4(Node* n, int scale);
 670     void scaled_iv_5(Node* n, int scale);
 671     void scaled_iv_6(Node* n, int scale);
 672     void scaled_iv_7(Node* n);
 673     void scaled_iv_8(Node* n, SWPointer* tmp);
 674     void scaled_iv_9(Node* n, int _scale, int _offset, int mult);
 675     void scaled_iv_10(Node* n);
 676 
 677     void offset_plus_k_1(Node* n);
 678     void offset_plus_k_2(Node* n, int _offset);
 679     void offset_plus_k_3(Node* n, int _offset);
 680     void offset_plus_k_4(Node* n);
 681     void offset_plus_k_5(Node* n, Node* _invar);
 682     void offset_plus_k_6(Node* n, Node* _invar, bool _negate_invar, int _offset);
 683     void offset_plus_k_7(Node* n, Node* _invar, bool _negate_invar, int _offset);
 684     void offset_plus_k_8(Node* n, Node* _invar, bool _negate_invar, int _offset);
 685     void offset_plus_k_9(Node* n, Node* _invar, bool _negate_invar, int _offset);
 686     void offset_plus_k_10(Node* n, Node* _invar, bool _negate_invar, int _offset);
 687     void offset_plus_k_11(Node* n);
 688 
 689   } _tracer;//TRacer;
 690 #endif
 691 };
 692 
 693 #endif // SHARE_VM_OPTO_SUPERWORD_HPP