1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MATCHER_HPP 26 #define SHARE_OPTO_MATCHER_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "opto/node.hpp" 31 #include "opto/phaseX.hpp" 32 #include "opto/regmask.hpp" 33 34 class Compile; 35 class Node; 36 class MachNode; 37 class MachTypeNode; 38 class MachOper; 39 40 //---------------------------Matcher------------------------------------------- 41 class Matcher : public PhaseTransform { 42 friend class VMStructs; 43 44 public: 45 46 // State and MStack class used in xform() and find_shared() iterative methods. 47 enum Node_State { Pre_Visit, // node has to be pre-visited 48 Visit, // visit node 49 Post_Visit, // post-visit node 50 Alt_Post_Visit // alternative post-visit path 51 }; 52 53 class MStack: public Node_Stack { 54 public: 55 MStack(int size) : Node_Stack(size) { } 56 57 void push(Node *n, Node_State ns) { 58 Node_Stack::push(n, (uint)ns); 59 } 60 void push(Node *n, Node_State ns, Node *parent, int indx) { 61 ++_inode_top; 62 if ((_inode_top + 1) >= _inode_max) grow(); 63 _inode_top->node = parent; 64 _inode_top->indx = (uint)indx; 65 ++_inode_top; 66 _inode_top->node = n; 67 _inode_top->indx = (uint)ns; 68 } 69 Node *parent() { 70 pop(); 71 return node(); 72 } 73 Node_State state() const { 74 return (Node_State)index(); 75 } 76 void set_state(Node_State ns) { 77 set_index((uint)ns); 78 } 79 }; 80 81 private: 82 // Private arena of State objects 83 ResourceArea _states_arena; 84 85 VectorSet _visited; // Visit bits 86 87 // Used to control the Label pass 88 VectorSet _shared; // Shared Ideal Node 89 VectorSet _dontcare; // Nothing the matcher cares about 90 91 // Private methods which perform the actual matching and reduction 92 // Walks the label tree, generating machine nodes 93 MachNode *ReduceInst( State *s, int rule, Node *&mem); 94 void ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach); 95 uint ReduceInst_Interior(State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds); 96 void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach ); 97 98 // If this node already matched using "rule", return the MachNode for it. 99 MachNode* find_shared_node(Node* n, uint rule); 100 101 // Convert a dense opcode number to an expanded rule number 102 const int *_reduceOp; 103 const int *_leftOp; 104 const int *_rightOp; 105 106 // Map dense opcode number to info on when rule is swallowed constant. 107 const bool *_swallowed; 108 109 // Map dense rule number to determine if this is an instruction chain rule 110 const uint _begin_inst_chain_rule; 111 const uint _end_inst_chain_rule; 112 113 // We want to clone constants and possible CmpI-variants. 114 // If we do not clone CmpI, then we can have many instances of 115 // condition codes alive at once. This is OK on some chips and 116 // bad on others. Hence the machine-dependent table lookup. 117 const char *_must_clone; 118 119 // Find shared Nodes, or Nodes that otherwise are Matcher roots 120 void find_shared( Node *n ); 121 bool find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx); 122 void find_shared_post_visit(Node* n, uint opcode); 123 124 bool is_vshift_con_pattern(Node *n, Node *m); 125 126 // Debug and profile information for nodes in old space: 127 GrowableArray<Node_Notes*>* _old_node_note_array; 128 129 // Node labeling iterator for instruction selection 130 Node* Label_Root(const Node* n, State* svec, Node* control, Node*& mem); 131 132 Node *transform( Node *dummy ); 133 134 Node_List _projection_list; // For Machine nodes killing many values 135 136 Node_Array _shared_nodes; 137 138 debug_only(Node_Array _old2new_map;) // Map roots of ideal-trees to machine-roots 139 debug_only(Node_Array _new2old_map;) // Maps machine nodes back to ideal 140 141 // Accessors for the inherited field PhaseTransform::_nodes: 142 void grow_new_node_array(uint idx_limit) { 143 _nodes.map(idx_limit-1, NULL); 144 } 145 bool has_new_node(const Node* n) const { 146 return _nodes.at(n->_idx) != NULL; 147 } 148 Node* new_node(const Node* n) const { 149 assert(has_new_node(n), "set before get"); 150 return _nodes.at(n->_idx); 151 } 152 void set_new_node(const Node* n, Node *nn) { 153 assert(!has_new_node(n), "set only once"); 154 _nodes.map(n->_idx, nn); 155 } 156 157 #ifdef ASSERT 158 // Make sure only new nodes are reachable from this node 159 void verify_new_nodes_only(Node* root); 160 161 Node* _mem_node; // Ideal memory node consumed by mach node 162 #endif 163 164 // Mach node for ConP #NULL 165 MachNode* _mach_null; 166 167 void handle_precedence_edges(Node* n, MachNode *mach); 168 169 public: 170 int LabelRootDepth; 171 // Convert ideal machine register to a register mask for spill-loads 172 static const RegMask *idealreg2regmask[]; 173 RegMask *idealreg2spillmask [_last_machine_leaf]; 174 RegMask *idealreg2debugmask [_last_machine_leaf]; 175 RegMask *idealreg2mhdebugmask[_last_machine_leaf]; 176 void init_spill_mask( Node *ret ); 177 // Convert machine register number to register mask 178 static uint mreg2regmask_max; 179 static RegMask mreg2regmask[]; 180 static RegMask STACK_ONLY_mask; 181 182 MachNode* mach_null() const { return _mach_null; } 183 184 bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; } 185 void set_shared( Node *n ) { _shared.set(n->_idx); } 186 bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; } 187 void set_visited( Node *n ) { _visited.set(n->_idx); } 188 bool is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; } 189 void set_dontcare( Node *n ) { _dontcare.set(n->_idx); } 190 191 // Mode bit to tell DFA and expand rules whether we are running after 192 // (or during) register selection. Usually, the matcher runs before, 193 // but it will also get called to generate post-allocation spill code. 194 // In this situation, it is a deadly error to attempt to allocate more 195 // temporary registers. 196 bool _allocation_started; 197 198 // Machine register names 199 static const char *regName[]; 200 // Machine register encodings 201 static const unsigned char _regEncode[]; 202 // Machine Node names 203 const char **_ruleName; 204 // Rules that are cheaper to rematerialize than to spill 205 static const uint _begin_rematerialize; 206 static const uint _end_rematerialize; 207 208 // An array of chars, from 0 to _last_Mach_Reg. 209 // No Save = 'N' (for register windows) 210 // Save on Entry = 'E' 211 // Save on Call = 'C' 212 // Always Save = 'A' (same as SOE + SOC) 213 const char *_register_save_policy; 214 const char *_c_reg_save_policy; 215 // Convert a machine register to a machine register type, so-as to 216 // properly match spill code. 217 const int *_register_save_type; 218 // Maps from machine register to boolean; true if machine register can 219 // be holding a call argument in some signature. 220 static bool can_be_java_arg( int reg ); 221 // Maps from machine register to boolean; true if machine register holds 222 // a spillable argument. 223 static bool is_spillable_arg( int reg ); 224 225 // List of IfFalse or IfTrue Nodes that indicate a taken null test. 226 // List is valid in the post-matching space. 227 Node_List _null_check_tests; 228 void collect_null_checks( Node *proj, Node *orig_proj ); 229 void validate_null_checks( ); 230 231 Matcher(); 232 233 // Get a projection node at position pos 234 Node* get_projection(uint pos) { 235 return _projection_list[pos]; 236 } 237 238 // Push a projection node onto the projection list 239 void push_projection(Node* node) { 240 _projection_list.push(node); 241 } 242 243 Node* pop_projection() { 244 return _projection_list.pop(); 245 } 246 247 // Number of nodes in the projection list 248 uint number_of_projections() const { 249 return _projection_list.size(); 250 } 251 252 // Select instructions for entire method 253 void match(); 254 255 // Helper for match 256 OptoReg::Name warp_incoming_stk_arg( VMReg reg ); 257 258 // Transform, then walk. Does implicit DCE while walking. 259 // Name changed from "transform" to avoid it being virtual. 260 Node *xform( Node *old_space_node, int Nodes ); 261 262 // Match a single Ideal Node - turn it into a 1-Node tree; Label & Reduce. 263 MachNode *match_tree( const Node *n ); 264 MachNode *match_sfpt( SafePointNode *sfpt ); 265 // Helper for match_sfpt 266 OptoReg::Name warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ); 267 268 // Initialize first stack mask and related masks. 269 void init_first_stack_mask(); 270 271 // If we should save-on-entry this register 272 bool is_save_on_entry( int reg ); 273 274 // Fixup the save-on-entry registers 275 void Fixup_Save_On_Entry( ); 276 277 // --- Frame handling --- 278 279 // Register number of the stack slot corresponding to the incoming SP. 280 // Per the Big Picture in the AD file, it is: 281 // SharedInfo::stack0 + locks + in_preserve_stack_slots + pad2. 282 OptoReg::Name _old_SP; 283 284 // Register number of the stack slot corresponding to the highest incoming 285 // argument on the stack. Per the Big Picture in the AD file, it is: 286 // _old_SP + out_preserve_stack_slots + incoming argument size. 287 OptoReg::Name _in_arg_limit; 288 289 // Register number of the stack slot corresponding to the new SP. 290 // Per the Big Picture in the AD file, it is: 291 // _in_arg_limit + pad0 292 OptoReg::Name _new_SP; 293 294 // Register number of the stack slot corresponding to the highest outgoing 295 // argument on the stack. Per the Big Picture in the AD file, it is: 296 // _new_SP + max outgoing arguments of all calls 297 OptoReg::Name _out_arg_limit; 298 299 OptoRegPair *_parm_regs; // Array of machine registers per argument 300 RegMask *_calling_convention_mask; // Array of RegMasks per argument 301 302 // Does matcher have a match rule for this ideal node? 303 static const bool has_match_rule(int opcode); 304 static const bool _hasMatchRule[_last_opcode]; 305 306 // Does matcher have a match rule for this ideal node and is the 307 // predicate (if there is one) true? 308 // NOTE: If this function is used more commonly in the future, ADLC 309 // should generate this one. 310 static const bool match_rule_supported(int opcode); 311 312 // identify extra cases that we might want to provide match rules for 313 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen 314 static const bool match_rule_supported_vector(int opcode, int vlen, BasicType bt); 315 316 // Some microarchitectures have mask registers used on vectors 317 static const bool has_predicated_vectors(void); 318 319 // Some uarchs have different sized float register resources 320 static const int float_pressure(int default_pressure_threshold); 321 322 // Used to determine if we have fast l2f conversion 323 // USII has it, USIII doesn't 324 static const bool convL2FSupported(void); 325 326 // Vector width in bytes 327 static const int vector_width_in_bytes(BasicType bt); 328 329 // Limits on vector size (number of elements). 330 static const int max_vector_size(const BasicType bt); 331 static const int min_vector_size(const BasicType bt); 332 static const bool vector_size_supported(const BasicType bt, int size) { 333 return (Matcher::max_vector_size(bt) >= size && 334 Matcher::min_vector_size(bt) <= size); 335 } 336 337 // Vector ideal reg 338 static const uint vector_ideal_reg(int len); 339 340 // Does the CPU supports vector variable shift instructions? 341 static bool supports_vector_variable_shifts(void); 342 343 // CPU supports misaligned vectors store/load. 344 static const bool misaligned_vectors_ok(); 345 346 // Should original key array reference be passed to AES stubs 347 static const bool pass_original_key_for_aes(); 348 349 // Used to determine a "low complexity" 64-bit constant. (Zero is simple.) 350 // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI). 351 // Depends on the details of 64-bit constant generation on the CPU. 352 static const bool isSimpleConstant64(jlong con); 353 354 // These calls are all generated by the ADLC 355 356 // TRUE - grows up, FALSE - grows down (Intel) 357 virtual bool stack_direction() const; 358 359 // Java-Java calling convention 360 // (what you use when Java calls Java) 361 362 // Alignment of stack in bytes, standard Intel word alignment is 4. 363 // Sparc probably wants at least double-word (8). 364 static uint stack_alignment_in_bytes(); 365 // Alignment of stack, measured in stack slots. 366 // The size of stack slots is defined by VMRegImpl::stack_slot_size. 367 static uint stack_alignment_in_slots() { 368 return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size); 369 } 370 371 // Array mapping arguments to registers. Argument 0 is usually the 'this' 372 // pointer. Registers can include stack-slots and regular registers. 373 static void calling_convention( BasicType *, VMRegPair *, uint len, bool is_outgoing ); 374 375 // Convert a sig into a calling convention register layout 376 // and find interesting things about it. 377 static OptoReg::Name find_receiver( bool is_outgoing ); 378 // Return address register. On Intel it is a stack-slot. On PowerPC 379 // it is the Link register. On Sparc it is r31? 380 virtual OptoReg::Name return_addr() const; 381 RegMask _return_addr_mask; 382 // Return value register. On Intel it is EAX. On Sparc i0/o0. 383 static OptoRegPair return_value(uint ideal_reg, bool is_outgoing); 384 static OptoRegPair c_return_value(uint ideal_reg, bool is_outgoing); 385 RegMask _return_value_mask; 386 // Inline Cache Register 387 static OptoReg::Name inline_cache_reg(); 388 static int inline_cache_reg_encode(); 389 390 // Register for DIVI projection of divmodI 391 static RegMask divI_proj_mask(); 392 // Register for MODI projection of divmodI 393 static RegMask modI_proj_mask(); 394 395 // Register for DIVL projection of divmodL 396 static RegMask divL_proj_mask(); 397 // Register for MODL projection of divmodL 398 static RegMask modL_proj_mask(); 399 400 // Use hardware DIV instruction when it is faster than 401 // a code which use multiply for division by constant. 402 static bool use_asm_for_ldiv_by_con( jlong divisor ); 403 404 static const RegMask method_handle_invoke_SP_save_mask(); 405 406 // Java-Interpreter calling convention 407 // (what you use when calling between compiled-Java and Interpreted-Java 408 409 // Number of callee-save + always-save registers 410 // Ignores frame pointer and "special" registers 411 static int number_of_saved_registers(); 412 413 // The Method-klass-holder may be passed in the inline_cache_reg 414 // and then expanded into the inline_cache_reg and a method_ptr register 415 416 static OptoReg::Name interpreter_method_oop_reg(); 417 static int interpreter_method_oop_reg_encode(); 418 419 static OptoReg::Name compiler_method_oop_reg(); 420 static const RegMask &compiler_method_oop_reg_mask(); 421 static int compiler_method_oop_reg_encode(); 422 423 // Interpreter's Frame Pointer Register 424 static OptoReg::Name interpreter_frame_pointer_reg(); 425 426 // Java-Native calling convention 427 // (what you use when intercalling between Java and C++ code) 428 429 // Array mapping arguments to registers. Argument 0 is usually the 'this' 430 // pointer. Registers can include stack-slots and regular registers. 431 static void c_calling_convention( BasicType*, VMRegPair *, uint ); 432 // Frame pointer. The frame pointer is kept at the base of the stack 433 // and so is probably the stack pointer for most machines. On Intel 434 // it is ESP. On the PowerPC it is R1. On Sparc it is SP. 435 OptoReg::Name c_frame_pointer() const; 436 static RegMask c_frame_ptr_mask; 437 438 // !!!!! Special stuff for building ScopeDescs 439 virtual int regnum_to_fpu_offset(int regnum); 440 441 // Is this branch offset small enough to be addressed by a short branch? 442 bool is_short_branch_offset(int rule, int br_size, int offset); 443 444 // Optional scaling for the parameter to the ClearArray/CopyArray node. 445 static const bool init_array_count_is_in_bytes; 446 447 // Some hardware needs 2 CMOV's for longs. 448 static const int long_cmove_cost(); 449 450 // Some hardware have expensive CMOV for float and double. 451 static const int float_cmove_cost(); 452 453 // Should the input 'm' of node 'n' be cloned during matching? 454 // Reports back whether the node was cloned or not. 455 bool clone_node(Node* n, Node* m, Matcher::MStack& mstack); 456 bool pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack); 457 458 // Should the Matcher clone shifts on addressing modes, expecting them to 459 // be subsumed into complex addressing expressions or compute them into 460 // registers? True for Intel but false for most RISCs 461 bool pd_clone_address_expressions(AddPNode* m, MStack& mstack, VectorSet& address_visited); 462 // Clone base + offset address expression 463 bool clone_base_plus_offset_address(AddPNode* m, MStack& mstack, VectorSet& address_visited); 464 465 static bool narrow_oop_use_complex_address(); 466 static bool narrow_klass_use_complex_address(); 467 468 static bool const_oop_prefer_decode(); 469 static bool const_klass_prefer_decode(); 470 471 // Generate implicit null check for narrow oops if it can fold 472 // into address expression (x64). 473 // 474 // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression 475 // NullCheck narrow_oop_reg 476 // 477 // When narrow oops can't fold into address expression (Sparc) and 478 // base is not null use decode_not_null and normal implicit null check. 479 // Note, decode_not_null node can be used here since it is referenced 480 // only on non null path but it requires special handling, see 481 // collect_null_checks(): 482 // 483 // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base' 484 // [oop_reg + offset] 485 // NullCheck oop_reg 486 // 487 // With Zero base and when narrow oops can not fold into address 488 // expression use normal implicit null check since only shift 489 // is needed to decode narrow oop. 490 // 491 // decode narrow_oop_reg, oop_reg // only 'shift' 492 // [oop_reg + offset] 493 // NullCheck oop_reg 494 // 495 static bool gen_narrow_oop_implicit_null_checks(); 496 497 // Is it better to copy float constants, or load them directly from memory? 498 // Intel can load a float constant from a direct address, requiring no 499 // extra registers. Most RISCs will have to materialize an address into a 500 // register first, so they may as well materialize the constant immediately. 501 static const bool rematerialize_float_constants; 502 503 // If CPU can load and store mis-aligned doubles directly then no fixup is 504 // needed. Else we split the double into 2 integer pieces and move it 505 // piece-by-piece. Only happens when passing doubles into C code or when 506 // calling i2c adapters as the Java calling convention forces doubles to be 507 // aligned. 508 static const bool misaligned_doubles_ok; 509 510 // Does the CPU require postalloc expand (see block.cpp for description of 511 // postalloc expand)? 512 static const bool require_postalloc_expand; 513 514 // Does the platform support generic vector operands? 515 // Requires cleanup after selection phase. 516 static const bool supports_generic_vector_operands; 517 518 private: 519 void do_postselect_cleanup(); 520 521 void specialize_generic_vector_operands(); 522 void specialize_mach_node(MachNode* m); 523 void specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx); 524 MachOper* specialize_vector_operand(MachNode* m, uint opnd_idx); 525 526 static MachOper* pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp); 527 static bool is_generic_reg2reg_move(MachNode* m); 528 static bool is_generic_vector(MachOper* opnd); 529 530 const RegMask* regmask_for_ideal_register(uint ideal_reg, Node* ret); 531 532 // Graph verification code 533 DEBUG_ONLY( bool verify_after_postselect_cleanup(); ) 534 535 public: 536 // Perform a platform dependent implicit null fixup. This is needed 537 // on windows95 to take care of some unusual register constraints. 538 void pd_implicit_null_fixup(MachNode *load, uint idx); 539 540 // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode. 541 static const bool strict_fp_requires_explicit_rounding; 542 543 // Are floats conerted to double when stored to stack during deoptimization? 544 static bool float_in_double(); 545 // Do ints take an entire long register or just half? 546 static const bool int_in_long; 547 548 // Do the processor's shift instructions only use the low 5/6 bits 549 // of the count for 32/64 bit ints? If not we need to do the masking 550 // ourselves. 551 static const bool need_masked_shift_count; 552 553 // Whether code generation need accurate ConvI2L types. 554 static const bool convi2l_type_required; 555 556 // This routine is run whenever a graph fails to match. 557 // If it returns, the compiler should bailout to interpreter without error. 558 // In non-product mode, SoftMatchFailure is false to detect non-canonical 559 // graphs. Print a message and exit. 560 static void soft_match_failure() { 561 if( SoftMatchFailure ) return; 562 else { fatal("SoftMatchFailure is not allowed except in product"); } 563 } 564 565 // Check for a following volatile memory barrier without an 566 // intervening load and thus we don't need a barrier here. We 567 // retain the Node to act as a compiler ordering barrier. 568 static bool post_store_load_barrier(const Node* mb); 569 570 // Does n lead to an uncommon trap that can cause deoptimization? 571 static bool branches_to_uncommon_trap(const Node *n); 572 573 #ifdef ASSERT 574 void dump_old2new_map(); // machine-independent to machine-dependent 575 576 Node* find_old_node(Node* new_node) { 577 return _new2old_map[new_node->_idx]; 578 } 579 #endif 580 }; 581 582 #endif // SHARE_OPTO_MATCHER_HPP