1 /* 2 * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #ifndef SHARE_VM_OPTO_SUPERWORD_HPP 25 #define SHARE_VM_OPTO_SUPERWORD_HPP 26 27 #include "opto/loopnode.hpp" 28 #include "opto/node.hpp" 29 #include "opto/phaseX.hpp" 30 #include "opto/vectornode.hpp" 31 #include "utilities/growableArray.hpp" 32 #include "libadt/dict.hpp" 33 34 // 35 // S U P E R W O R D T R A N S F O R M 36 // 37 // SuperWords are short, fixed length vectors. 38 // 39 // Algorithm from: 40 // 41 // Exploiting SuperWord Level Parallelism with 42 // Multimedia Instruction Sets 43 // by 44 // Samuel Larsen and Saman Amarasinghe 45 // MIT Laboratory for Computer Science 46 // date 47 // May 2000 48 // published in 49 // ACM SIGPLAN Notices 50 // Proceedings of ACM PLDI '00, Volume 35 Issue 5 51 // 52 // Definition 3.1 A Pack is an n-tuple, <s1, ...,sn>, where 53 // s1,...,sn are independent isomorphic statements in a basic 54 // block. 55 // 56 // Definition 3.2 A PackSet is a set of Packs. 57 // 58 // Definition 3.3 A Pair is a Pack of size two, where the 59 // first statement is considered the left element, and the 60 // second statement is considered the right element. 61 62 class SWPointer; 63 class OrderedPair; 64 65 // ========================= Dependence Graph ===================== 66 67 class DepMem; 68 69 //------------------------------DepEdge--------------------------- 70 // An edge in the dependence graph. The edges incident to a dependence 71 // node are threaded through _next_in for incoming edges and _next_out 72 // for outgoing edges. 73 class DepEdge : public ResourceObj { 74 protected: 75 DepMem* _pred; 76 DepMem* _succ; 77 DepEdge* _next_in; // list of in edges, null terminated 78 DepEdge* _next_out; // list of out edges, null terminated 79 80 public: 81 DepEdge(DepMem* pred, DepMem* succ, DepEdge* next_in, DepEdge* next_out) : 82 _pred(pred), _succ(succ), _next_in(next_in), _next_out(next_out) {} 83 84 DepEdge* next_in() { return _next_in; } 85 DepEdge* next_out() { return _next_out; } 86 DepMem* pred() { return _pred; } 87 DepMem* succ() { return _succ; } 88 89 void print(); 90 }; 91 92 //------------------------------DepMem--------------------------- 93 // A node in the dependence graph. _in_head starts the threaded list of 94 // incoming edges, and _out_head starts the list of outgoing edges. 95 class DepMem : public ResourceObj { 96 protected: 97 Node* _node; // Corresponding ideal node 98 DepEdge* _in_head; // Head of list of in edges, null terminated 99 DepEdge* _out_head; // Head of list of out edges, null terminated 100 101 public: 102 DepMem(Node* node) : _node(node), _in_head(NULL), _out_head(NULL) {} 103 104 Node* node() { return _node; } 105 DepEdge* in_head() { return _in_head; } 106 DepEdge* out_head() { return _out_head; } 107 void set_in_head(DepEdge* hd) { _in_head = hd; } 108 void set_out_head(DepEdge* hd) { _out_head = hd; } 109 110 int in_cnt(); // Incoming edge count 111 int out_cnt(); // Outgoing edge count 112 113 void print(); 114 }; 115 116 //------------------------------DepGraph--------------------------- 117 class DepGraph VALUE_OBJ_CLASS_SPEC { 118 protected: 119 Arena* _arena; 120 GrowableArray<DepMem*> _map; 121 DepMem* _root; 122 DepMem* _tail; 123 124 public: 125 DepGraph(Arena* a) : _arena(a), _map(a, 8, 0, NULL) { 126 _root = new (_arena) DepMem(NULL); 127 _tail = new (_arena) DepMem(NULL); 128 } 129 130 DepMem* root() { return _root; } 131 DepMem* tail() { return _tail; } 132 133 // Return dependence node corresponding to an ideal node 134 DepMem* dep(Node* node) { return _map.at(node->_idx); } 135 136 // Make a new dependence graph node for an ideal node. 137 DepMem* make_node(Node* node); 138 139 // Make a new dependence graph edge dprec->dsucc 140 DepEdge* make_edge(DepMem* dpred, DepMem* dsucc); 141 142 DepEdge* make_edge(Node* pred, Node* succ) { return make_edge(dep(pred), dep(succ)); } 143 DepEdge* make_edge(DepMem* pred, Node* succ) { return make_edge(pred, dep(succ)); } 144 DepEdge* make_edge(Node* pred, DepMem* succ) { return make_edge(dep(pred), succ); } 145 146 void init() { _map.clear(); } // initialize 147 148 void print(Node* n) { dep(n)->print(); } 149 void print(DepMem* d) { d->print(); } 150 }; 151 152 //------------------------------DepPreds--------------------------- 153 // Iterator over predecessors in the dependence graph and 154 // non-memory-graph inputs of ideal nodes. 155 class DepPreds : public StackObj { 156 private: 157 Node* _n; 158 int _next_idx, _end_idx; 159 DepEdge* _dep_next; 160 Node* _current; 161 bool _done; 162 163 public: 164 DepPreds(Node* n, DepGraph& dg); 165 Node* current() { return _current; } 166 bool done() { return _done; } 167 void next(); 168 }; 169 170 //------------------------------DepSuccs--------------------------- 171 // Iterator over successors in the dependence graph and 172 // non-memory-graph outputs of ideal nodes. 173 class DepSuccs : public StackObj { 174 private: 175 Node* _n; 176 int _next_idx, _end_idx; 177 DepEdge* _dep_next; 178 Node* _current; 179 bool _done; 180 181 public: 182 DepSuccs(Node* n, DepGraph& dg); 183 Node* current() { return _current; } 184 bool done() { return _done; } 185 void next(); 186 }; 187 188 189 // ========================= SuperWord ===================== 190 191 // -----------------------------SWNodeInfo--------------------------------- 192 // Per node info needed by SuperWord 193 class SWNodeInfo VALUE_OBJ_CLASS_SPEC { 194 public: 195 int _alignment; // memory alignment for a node 196 int _depth; // Max expression (DAG) depth from block start 197 const Type* _velt_type; // vector element type 198 Node_List* _my_pack; // pack containing this node 199 200 SWNodeInfo() : _alignment(-1), _depth(0), _velt_type(NULL), _my_pack(NULL) {} 201 static const SWNodeInfo initial; 202 }; 203 204 class SuperWord; 205 class CMoveKit { 206 friend class SuperWord; 207 private: 208 SuperWord* _sw; 209 Dict* _dict; 210 CMoveKit(Arena* a, SuperWord* sw) : _sw(sw) {_dict = new Dict(cmpkey, hashkey, a);} 211 void* _2p(Node* key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy 212 Dict* dict() const { return _dict; } 213 void map(Node* key, Node_List* val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); } 214 void unmap(Node* key) { _dict->Delete(_2p(key)); } 215 Node_List* pack(Node* key) const { return (Node_List*)_dict->operator[](_2p(key)); } 216 Node* is_Bool_candidate(Node* nd) const; // if it is the right candidate return corresponding CMove* , 217 Node* is_CmpD_candidate(Node* nd) const; // otherwise return NULL 218 Node_List* make_cmovevd_pack(Node_List* cmovd_pk); 219 bool test_cmpd_pack(Node_List* cmpd_pk, Node_List* cmovd_pk); 220 };//class CMoveKit 221 222 // JVMCI: OrderedPair is moved up to deal with compilation issues on Windows 223 //------------------------------OrderedPair--------------------------- 224 // Ordered pair of Node*. 225 class OrderedPair VALUE_OBJ_CLASS_SPEC { 226 protected: 227 Node* _p1; 228 Node* _p2; 229 public: 230 OrderedPair() : _p1(NULL), _p2(NULL) {} 231 OrderedPair(Node* p1, Node* p2) { 232 if (p1->_idx < p2->_idx) { 233 _p1 = p1; _p2 = p2; 234 } else { 235 _p1 = p2; _p2 = p1; 236 } 237 } 238 239 bool operator==(const OrderedPair &rhs) { 240 return _p1 == rhs._p1 && _p2 == rhs._p2; 241 } 242 void print() { tty->print(" (%d, %d)", _p1->_idx, _p2->_idx); } 243 244 static const OrderedPair initial; 245 }; 246 247 // -----------------------------SuperWord--------------------------------- 248 // Transforms scalar operations into packed (superword) operations. 249 class SuperWord : public ResourceObj { 250 friend class SWPointer; 251 friend class CMoveKit; 252 private: 253 PhaseIdealLoop* _phase; 254 Arena* _arena; 255 PhaseIterGVN &_igvn; 256 257 enum consts { top_align = -1, bottom_align = -666 }; 258 259 GrowableArray<Node_List*> _packset; // Packs for the current block 260 261 GrowableArray<int> _bb_idx; // Map from Node _idx to index within block 262 263 GrowableArray<Node*> _block; // Nodes in current block 264 GrowableArray<Node*> _post_block; // Nodes in post loop block 265 GrowableArray<Node*> _data_entry; // Nodes with all inputs from outside 266 GrowableArray<Node*> _mem_slice_head; // Memory slice head nodes 267 GrowableArray<Node*> _mem_slice_tail; // Memory slice tail nodes 268 GrowableArray<Node*> _iteration_first; // nodes in the generation that has deps from phi 269 GrowableArray<Node*> _iteration_last; // nodes in the generation that has deps to phi 270 GrowableArray<SWNodeInfo> _node_info; // Info needed per node 271 CloneMap& _clone_map; // map of nodes created in cloning 272 CMoveKit _cmovev_kit; // support for vectorization of CMov 273 MemNode* _align_to_ref; // Memory reference that pre-loop will align to 274 275 GrowableArray<OrderedPair> _disjoint_ptrs; // runtime disambiguated pointer pairs 276 277 DepGraph _dg; // Dependence graph 278 279 // Scratch pads 280 VectorSet _visited; // Visited set 281 VectorSet _post_visited; // Post-visited set 282 Node_Stack _n_idx_list; // List of (node,index) pairs 283 GrowableArray<Node*> _nlist; // List of nodes 284 GrowableArray<Node*> _stk; // Stack of nodes 285 286 public: 287 SuperWord(PhaseIdealLoop* phase); 288 289 void transform_loop(IdealLoopTree* lpt, bool do_optimization); 290 291 void unrolling_analysis(int &local_loop_unroll_factor); 292 293 // Accessors for SWPointer 294 PhaseIdealLoop* phase() { return _phase; } 295 IdealLoopTree* lpt() { return _lpt; } 296 PhiNode* iv() { return _iv; } 297 298 bool early_return() { return _early_return; } 299 300 #ifndef PRODUCT 301 bool is_debug() { return _vector_loop_debug > 0; } 302 bool is_trace_alignment() { return (_vector_loop_debug & 2) > 0; } 303 bool is_trace_mem_slice() { return (_vector_loop_debug & 4) > 0; } 304 bool is_trace_loop() { return (_vector_loop_debug & 8) > 0; } 305 bool is_trace_adjacent() { return (_vector_loop_debug & 16) > 0; } 306 bool is_trace_cmov() { return (_vector_loop_debug & 32) > 0; } 307 bool is_trace_loop_reverse() { return (_vector_loop_debug & 64) > 0; } 308 #endif 309 bool do_vector_loop() { return _do_vector_loop; } 310 bool do_reserve_copy() { return _do_reserve_copy; } 311 private: 312 IdealLoopTree* _lpt; // Current loop tree node 313 LoopNode* _lp; // Current LoopNode 314 Node* _bb; // Current basic block 315 PhiNode* _iv; // Induction var 316 bool _race_possible; // In cases where SDMU is true 317 bool _early_return; // True if we do not initialize 318 bool _do_vector_loop; // whether to do vectorization/simd style 319 bool _do_reserve_copy; // do reserve copy of the graph(loop) before final modification in output 320 int _num_work_vecs; // Number of non memory vector operations 321 int _num_reductions; // Number of reduction expressions applied 322 int _ii_first; // generation with direct deps from mem phi 323 int _ii_last; // generation with direct deps to mem phi 324 GrowableArray<int> _ii_order; 325 #ifndef PRODUCT 326 uintx _vector_loop_debug; // provide more printing in debug mode 327 #endif 328 329 // Accessors 330 Arena* arena() { return _arena; } 331 332 Node* bb() { return _bb; } 333 void set_bb(Node* bb) { _bb = bb; } 334 335 void set_lpt(IdealLoopTree* lpt) { _lpt = lpt; } 336 337 LoopNode* lp() { return _lp; } 338 void set_lp(LoopNode* lp) { _lp = lp; 339 _iv = lp->as_CountedLoop()->phi()->as_Phi(); } 340 int iv_stride() { return lp()->as_CountedLoop()->stride_con(); } 341 342 int vector_width(Node* n) { 343 BasicType bt = velt_basic_type(n); 344 return MIN2(ABS(iv_stride()), Matcher::max_vector_size(bt)); 345 } 346 int vector_width_in_bytes(Node* n) { 347 BasicType bt = velt_basic_type(n); 348 return vector_width(n)*type2aelembytes(bt); 349 } 350 MemNode* align_to_ref() { return _align_to_ref; } 351 void set_align_to_ref(MemNode* m) { _align_to_ref = m; } 352 353 Node* ctrl(Node* n) const { return _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; } 354 355 // block accessors 356 bool in_bb(Node* n) { return n != NULL && n->outcnt() > 0 && ctrl(n) == _bb; } 357 int bb_idx(Node* n) { assert(in_bb(n), "must be"); return _bb_idx.at(n->_idx); } 358 void set_bb_idx(Node* n, int i) { _bb_idx.at_put_grow(n->_idx, i); } 359 360 // visited set accessors 361 void visited_clear() { _visited.Clear(); } 362 void visited_set(Node* n) { return _visited.set(bb_idx(n)); } 363 int visited_test(Node* n) { return _visited.test(bb_idx(n)); } 364 int visited_test_set(Node* n) { return _visited.test_set(bb_idx(n)); } 365 void post_visited_clear() { _post_visited.Clear(); } 366 void post_visited_set(Node* n) { return _post_visited.set(bb_idx(n)); } 367 int post_visited_test(Node* n) { return _post_visited.test(bb_idx(n)); } 368 369 // Ensure node_info contains element "i" 370 void grow_node_info(int i) { if (i >= _node_info.length()) _node_info.at_put_grow(i, SWNodeInfo::initial); } 371 372 // memory alignment for a node 373 int alignment(Node* n) { return _node_info.adr_at(bb_idx(n))->_alignment; } 374 void set_alignment(Node* n, int a) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_alignment = a; } 375 376 // Max expression (DAG) depth from beginning of the block for each node 377 int depth(Node* n) { return _node_info.adr_at(bb_idx(n))->_depth; } 378 void set_depth(Node* n, int d) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_depth = d; } 379 380 // vector element type 381 const Type* velt_type(Node* n) { return _node_info.adr_at(bb_idx(n))->_velt_type; } 382 BasicType velt_basic_type(Node* n) { return velt_type(n)->array_element_basic_type(); } 383 void set_velt_type(Node* n, const Type* t) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_velt_type = t; } 384 bool same_velt_type(Node* n1, Node* n2); 385 386 // my_pack 387 Node_List* my_pack(Node* n) { return !in_bb(n) ? NULL : _node_info.adr_at(bb_idx(n))->_my_pack; } 388 void set_my_pack(Node* n, Node_List* p) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_my_pack = p; } 389 // is pack good for converting into one vector node replacing 12 nodes of Cmp, Bool, CMov 390 bool is_cmov_pack(Node_List* p); 391 bool is_cmov_pack_internal_node(Node_List* p, Node* nd) { return is_cmov_pack(p) && !nd->is_CMove(); } 392 // For pack p, are all idx operands the same? 393 bool same_inputs(Node_List* p, int idx); 394 // CloneMap utilities 395 bool same_origin_idx(Node* a, Node* b) const; 396 bool same_generation(Node* a, Node* b) const; 397 398 // methods 399 400 // Extract the superword level parallelism 401 void SLP_extract(); 402 // Find the adjacent memory references and create pack pairs for them. 403 void find_adjacent_refs(); 404 // Tracing support 405 #ifndef PRODUCT 406 void find_adjacent_refs_trace_1(Node* best_align_to_mem_ref, int best_iv_adjustment); 407 void print_loop(bool whole); 408 #endif 409 // Find a memory reference to align the loop induction variable to. 410 MemNode* find_align_to_ref(Node_List &memops); 411 // Calculate loop's iv adjustment for this memory ops. 412 int get_iv_adjustment(MemNode* mem); 413 // Can the preloop align the reference to position zero in the vector? 414 bool ref_is_alignable(SWPointer& p); 415 // rebuild the graph so all loads in different iterations of cloned loop become dependant on phi node (in _do_vector_loop only) 416 bool hoist_loads_in_graph(); 417 // Test whether MemNode::Memory dependency to the same load but in the first iteration of this loop is coming from memory phi 418 // Return false if failed 419 Node* find_phi_for_mem_dep(LoadNode* ld); 420 // Return same node but from the first generation. Return 0, if not found 421 Node* first_node(Node* nd); 422 // Return same node as this but from the last generation. Return 0, if not found 423 Node* last_node(Node* n); 424 // Mark nodes belonging to first and last generation 425 // returns first generation index or -1 if vectorization/simd is impossible 426 int mark_generations(); 427 // swapping inputs of commutative instruction (Add or Mul) 428 bool fix_commutative_inputs(Node* gold, Node* fix); 429 // make packs forcefully (in _do_vector_loop only) 430 bool pack_parallel(); 431 // Construct dependency graph. 432 void dependence_graph(); 433 // Return a memory slice (node list) in predecessor order starting at "start" 434 void mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds); 435 // Can s1 and s2 be in a pack with s1 immediately preceding s2 and s1 aligned at "align" 436 bool stmts_can_pack(Node* s1, Node* s2, int align); 437 // Does s exist in a pack at position pos? 438 bool exists_at(Node* s, uint pos); 439 // Is s1 immediately before s2 in memory? 440 bool are_adjacent_refs(Node* s1, Node* s2); 441 // Are s1 and s2 similar? 442 bool isomorphic(Node* s1, Node* s2); 443 // Is there no data path from s1 to s2 or s2 to s1? 444 bool independent(Node* s1, Node* s2); 445 // Is there a data path between s1 and s2 and both are reductions? 446 bool reduction(Node* s1, Node* s2); 447 // Helper for independent 448 bool independent_path(Node* shallow, Node* deep, uint dp=0); 449 void set_alignment(Node* s1, Node* s2, int align); 450 int data_size(Node* s); 451 // Extend packset by following use->def and def->use links from pack members. 452 void extend_packlist(); 453 // Extend the packset by visiting operand definitions of nodes in pack p 454 bool follow_use_defs(Node_List* p); 455 // Extend the packset by visiting uses of nodes in pack p 456 bool follow_def_uses(Node_List* p); 457 // For extended packsets, ordinally arrange uses packset by major component 458 void order_def_uses(Node_List* p); 459 // Estimate the savings from executing s1 and s2 as a pack 460 int est_savings(Node* s1, Node* s2); 461 int adjacent_profit(Node* s1, Node* s2); 462 int pack_cost(int ct); 463 int unpack_cost(int ct); 464 // Combine packs A and B with A.last == B.first into A.first..,A.last,B.second,..B.last 465 void combine_packs(); 466 // Construct the map from nodes to packs. 467 void construct_my_pack_map(); 468 // Remove packs that are not implemented or not profitable. 469 void filter_packs(); 470 // Merge CMoveD into new vector-nodes 471 void merge_packs_to_cmovd(); 472 // Adjust the memory graph for the packed operations 473 void schedule(); 474 // Remove "current" from its current position in the memory graph and insert 475 // it after the appropriate insert points (lip or uip); 476 void remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip, Node *uip, Unique_Node_List &schd_before); 477 // Within a store pack, schedule stores together by moving out the sandwiched memory ops according 478 // to dependence info; and within a load pack, move loads down to the last executed load. 479 void co_locate_pack(Node_List* p); 480 // Convert packs into vector node operations 481 void output(); 482 // Create a vector operand for the nodes in pack p for operand: in(opd_idx) 483 Node* vector_opd(Node_List* p, int opd_idx); 484 // Can code be generated for pack p? 485 bool implemented(Node_List* p); 486 // For pack p, are all operands and all uses (with in the block) vector? 487 bool profitable(Node_List* p); 488 // If a use of pack p is not a vector use, then replace the use with an extract operation. 489 void insert_extracts(Node_List* p); 490 // Is use->in(u_idx) a vector use? 491 bool is_vector_use(Node* use, int u_idx); 492 // Construct reverse postorder list of block members 493 bool construct_bb(); 494 // Initialize per node info 495 void initialize_bb(); 496 // Insert n into block after pos 497 void bb_insert_after(Node* n, int pos); 498 // Compute max depth for expressions from beginning of block 499 void compute_max_depth(); 500 // Compute necessary vector element type for expressions 501 void compute_vector_element_type(); 502 // Are s1 and s2 in a pack pair and ordered as s1,s2? 503 bool in_packset(Node* s1, Node* s2); 504 // Is s in pack p? 505 Node_List* in_pack(Node* s, Node_List* p); 506 // Remove the pack at position pos in the packset 507 void remove_pack_at(int pos); 508 // Return the node executed first in pack p. 509 Node* executed_first(Node_List* p); 510 // Return the node executed last in pack p. 511 Node* executed_last(Node_List* p); 512 static LoadNode::ControlDependency control_dependency(Node_List* p); 513 // Alignment within a vector memory reference 514 int memory_alignment(MemNode* s, int iv_adjust); 515 // (Start, end] half-open range defining which operands are vector 516 void vector_opd_range(Node* n, uint* start, uint* end); 517 // Smallest type containing range of values 518 const Type* container_type(Node* n); 519 // Adjust pre-loop limit so that in main loop, a load/store reference 520 // to align_to_ref will be a position zero in the vector. 521 void align_initial_loop_index(MemNode* align_to_ref); 522 // Find pre loop end from main loop. Returns null if none. 523 CountedLoopEndNode* get_pre_loop_end(CountedLoopNode *cl); 524 // Is the use of d1 in u1 at the same operand position as d2 in u2? 525 bool opnd_positions_match(Node* d1, Node* u1, Node* d2, Node* u2); 526 void init(); 527 // clean up some basic structures - used if the ideal graph was rebuilt 528 void restart(); 529 530 // print methods 531 void print_packset(); 532 void print_pack(Node_List* p); 533 void print_bb(); 534 void print_stmt(Node* s); 535 char* blank(uint depth); 536 537 void packset_sort(int n); 538 }; 539 540 541 542 //------------------------------SWPointer--------------------------- 543 // Information about an address for dependence checking and vector alignment 544 class SWPointer VALUE_OBJ_CLASS_SPEC { 545 protected: 546 MemNode* _mem; // My memory reference node 547 SuperWord* _slp; // SuperWord class 548 549 Node* _base; // NULL if unsafe nonheap reference 550 Node* _adr; // address pointer 551 jint _scale; // multiplier for iv (in bytes), 0 if no loop iv 552 jint _offset; // constant offset (in bytes) 553 Node* _invar; // invariant offset (in bytes), NULL if none 554 bool _negate_invar; // if true then use: (0 - _invar) 555 Node_Stack* _nstack; // stack used to record a swpointer trace of variants 556 bool _analyze_only; // Used in loop unrolling only for swpointer trace 557 uint _stack_idx; // Used in loop unrolling only for swpointer trace 558 559 PhaseIdealLoop* phase() { return _slp->phase(); } 560 IdealLoopTree* lpt() { return _slp->lpt(); } 561 PhiNode* iv() { return _slp->iv(); } // Induction var 562 563 bool invariant(Node* n); 564 565 // Match: k*iv + offset 566 bool scaled_iv_plus_offset(Node* n); 567 // Match: k*iv where k is a constant that's not zero 568 bool scaled_iv(Node* n); 569 // Match: offset is (k [+/- invariant]) 570 bool offset_plus_k(Node* n, bool negate = false); 571 572 public: 573 enum CMP { 574 Less = 1, 575 Greater = 2, 576 Equal = 4, 577 NotEqual = (Less | Greater), 578 NotComparable = (Less | Greater | Equal) 579 }; 580 581 SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only); 582 // Following is used to create a temporary object during 583 // the pattern match of an address expression. 584 SWPointer(SWPointer* p); 585 586 bool valid() { return _adr != NULL; } 587 bool has_iv() { return _scale != 0; } 588 589 Node* base() { return _base; } 590 Node* adr() { return _adr; } 591 MemNode* mem() { return _mem; } 592 int scale_in_bytes() { return _scale; } 593 Node* invar() { return _invar; } 594 bool negate_invar() { return _negate_invar; } 595 int offset_in_bytes() { return _offset; } 596 int memory_size() { return _mem->memory_size(); } 597 Node_Stack* node_stack() { return _nstack; } 598 599 // Comparable? 600 int cmp(SWPointer& q) { 601 if (valid() && q.valid() && 602 (_adr == q._adr || _base == _adr && q._base == q._adr) && 603 _scale == q._scale && 604 _invar == q._invar && 605 _negate_invar == q._negate_invar) { 606 bool overlap = q._offset < _offset + memory_size() && 607 _offset < q._offset + q.memory_size(); 608 return overlap ? Equal : (_offset < q._offset ? Less : Greater); 609 } else { 610 return NotComparable; 611 } 612 } 613 614 bool not_equal(SWPointer& q) { return not_equal(cmp(q)); } 615 bool equal(SWPointer& q) { return equal(cmp(q)); } 616 bool comparable(SWPointer& q) { return comparable(cmp(q)); } 617 static bool not_equal(int cmp) { return cmp <= NotEqual; } 618 static bool equal(int cmp) { return cmp == Equal; } 619 static bool comparable(int cmp) { return cmp < NotComparable; } 620 621 void print(); 622 623 #ifndef PRODUCT 624 class Tracer { 625 friend class SuperWord; 626 friend class SWPointer; 627 SuperWord* _slp; 628 static int _depth; 629 int _depth_save; 630 void print_depth(); 631 int depth() const { return _depth; } 632 void set_depth(int d) { _depth = d; } 633 void inc_depth() { _depth++;} 634 void dec_depth() { if (_depth > 0) _depth--;} 635 void store_depth() {_depth_save = _depth;} 636 void restore_depth() {_depth = _depth_save;} 637 638 class Depth { 639 friend class Tracer; 640 friend class SWPointer; 641 friend class SuperWord; 642 Depth() { ++_depth; } 643 Depth(int x) { _depth = 0; } 644 ~Depth() { if (_depth > 0) --_depth;} 645 }; 646 Tracer (SuperWord* slp) : _slp(slp) {} 647 648 // tracing functions 649 void ctor_1(Node* mem); 650 void ctor_2(Node* adr); 651 void ctor_3(Node* adr, int i); 652 void ctor_4(Node* adr, int i); 653 void ctor_5(Node* adr, Node* base, int i); 654 void ctor_6(Node* mem); 655 656 void invariant_1(Node *n, Node *n_c); 657 658 void scaled_iv_plus_offset_1(Node* n); 659 void scaled_iv_plus_offset_2(Node* n); 660 void scaled_iv_plus_offset_3(Node* n); 661 void scaled_iv_plus_offset_4(Node* n); 662 void scaled_iv_plus_offset_5(Node* n); 663 void scaled_iv_plus_offset_6(Node* n); 664 void scaled_iv_plus_offset_7(Node* n); 665 void scaled_iv_plus_offset_8(Node* n); 666 667 void scaled_iv_1(Node* n); 668 void scaled_iv_2(Node* n, int scale); 669 void scaled_iv_3(Node* n, int scale); 670 void scaled_iv_4(Node* n, int scale); 671 void scaled_iv_5(Node* n, int scale); 672 void scaled_iv_6(Node* n, int scale); 673 void scaled_iv_7(Node* n); 674 void scaled_iv_8(Node* n, SWPointer* tmp); 675 void scaled_iv_9(Node* n, int _scale, int _offset, int mult); 676 void scaled_iv_10(Node* n); 677 678 void offset_plus_k_1(Node* n); 679 void offset_plus_k_2(Node* n, int _offset); 680 void offset_plus_k_3(Node* n, int _offset); 681 void offset_plus_k_4(Node* n); 682 void offset_plus_k_5(Node* n, Node* _invar); 683 void offset_plus_k_6(Node* n, Node* _invar, bool _negate_invar, int _offset); 684 void offset_plus_k_7(Node* n, Node* _invar, bool _negate_invar, int _offset); 685 void offset_plus_k_8(Node* n, Node* _invar, bool _negate_invar, int _offset); 686 void offset_plus_k_9(Node* n, Node* _invar, bool _negate_invar, int _offset); 687 void offset_plus_k_10(Node* n, Node* _invar, bool _negate_invar, int _offset); 688 void offset_plus_k_11(Node* n); 689 690 } _tracer;//TRacer; 691 #endif 692 }; 693 694 #endif // SHARE_VM_OPTO_SUPERWORD_HPP