1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MATCHER_HPP 26 #define SHARE_VM_OPTO_MATCHER_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "opto/node.hpp" 31 #include "opto/phaseX.hpp" 32 #include "opto/regmask.hpp" 33 34 class Compile; 35 class Node; 36 class MachNode; 37 class MachTypeNode; 38 class MachOper; 39 40 //---------------------------Matcher------------------------------------------- 41 class Matcher : public PhaseTransform { 42 friend class VMStructs; 43 // Private arena of State objects 44 ResourceArea _states_arena; 45 46 VectorSet _visited; // Visit bits 47 48 // Used to control the Label pass 49 VectorSet _shared; // Shared Ideal Node 50 VectorSet _dontcare; // Nothing the matcher cares about 51 52 // Private methods which perform the actual matching and reduction 53 // Walks the label tree, generating machine nodes 54 MachNode *ReduceInst( State *s, int rule, Node *&mem); 55 void ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach); 56 uint ReduceInst_Interior(State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds); 57 void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach ); 58 59 // If this node already matched using "rule", return the MachNode for it. 60 MachNode* find_shared_node(Node* n, uint rule); 61 62 // Convert a dense opcode number to an expanded rule number 63 const int *_reduceOp; 64 const int *_leftOp; 65 const int *_rightOp; 66 67 // Map dense opcode number to info on when rule is swallowed constant. 68 const bool *_swallowed; 69 70 // Map dense rule number to determine if this is an instruction chain rule 71 const uint _begin_inst_chain_rule; 72 const uint _end_inst_chain_rule; 73 74 // We want to clone constants and possible CmpI-variants. 75 // If we do not clone CmpI, then we can have many instances of 76 // condition codes alive at once. This is OK on some chips and 77 // bad on others. Hence the machine-dependent table lookup. 78 const char *_must_clone; 79 80 // Find shared Nodes, or Nodes that otherwise are Matcher roots 81 void find_shared( Node *n ); 82 #ifdef X86 83 bool is_bmi_pattern(Node *n, Node *m); 84 #endif 85 86 // Debug and profile information for nodes in old space: 87 GrowableArray<Node_Notes*>* _old_node_note_array; 88 89 // Node labeling iterator for instruction selection 90 Node *Label_Root( const Node *n, State *svec, Node *control, const Node *mem ); 91 92 Node *transform( Node *dummy ); 93 94 Node_List _projection_list; // For Machine nodes killing many values 95 96 Node_Array _shared_nodes; 97 98 debug_only(Node_Array _old2new_map;) // Map roots of ideal-trees to machine-roots 99 debug_only(Node_Array _new2old_map;) // Maps machine nodes back to ideal 100 101 // Accessors for the inherited field PhaseTransform::_nodes: 102 void grow_new_node_array(uint idx_limit) { 103 _nodes.map(idx_limit-1, NULL); 104 } 105 bool has_new_node(const Node* n) const { 106 return _nodes.at(n->_idx) != NULL; 107 } 108 Node* new_node(const Node* n) const { 109 assert(has_new_node(n), "set before get"); 110 return _nodes.at(n->_idx); 111 } 112 void set_new_node(const Node* n, Node *nn) { 113 assert(!has_new_node(n), "set only once"); 114 _nodes.map(n->_idx, nn); 115 } 116 117 #ifdef ASSERT 118 // Make sure only new nodes are reachable from this node 119 void verify_new_nodes_only(Node* root); 120 121 Node* _mem_node; // Ideal memory node consumed by mach node 122 #endif 123 124 // Mach node for ConP #NULL 125 MachNode* _mach_null; 126 void handle_precedence_edges(Node* n, MachNode *mach); 127 128 public: 129 int LabelRootDepth; 130 // Convert ideal machine register to a register mask for spill-loads 131 static const RegMask *idealreg2regmask[]; 132 RegMask *idealreg2spillmask [_last_machine_leaf]; 133 RegMask *idealreg2debugmask [_last_machine_leaf]; 134 RegMask *idealreg2mhdebugmask[_last_machine_leaf]; 135 void init_spill_mask( Node *ret ); 136 // Convert machine register number to register mask 137 static uint mreg2regmask_max; 138 static RegMask mreg2regmask[]; 139 static RegMask STACK_ONLY_mask; 140 141 MachNode* mach_null() const { return _mach_null; } 142 143 bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; } 144 void set_shared( Node *n ) { _shared.set(n->_idx); } 145 bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; } 146 void set_visited( Node *n ) { _visited.set(n->_idx); } 147 bool is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; } 148 void set_dontcare( Node *n ) { _dontcare.set(n->_idx); } 149 150 // Mode bit to tell DFA and expand rules whether we are running after 151 // (or during) register selection. Usually, the matcher runs before, 152 // but it will also get called to generate post-allocation spill code. 153 // In this situation, it is a deadly error to attempt to allocate more 154 // temporary registers. 155 bool _allocation_started; 156 157 // Machine register names 158 static const char *regName[]; 159 // Machine register encodings 160 static const unsigned char _regEncode[]; 161 // Machine Node names 162 const char **_ruleName; 163 // Rules that are cheaper to rematerialize than to spill 164 static const uint _begin_rematerialize; 165 static const uint _end_rematerialize; 166 167 // An array of chars, from 0 to _last_Mach_Reg. 168 // No Save = 'N' (for register windows) 169 // Save on Entry = 'E' 170 // Save on Call = 'C' 171 // Always Save = 'A' (same as SOE + SOC) 172 const char *_register_save_policy; 173 const char *_c_reg_save_policy; 174 // Convert a machine register to a machine register type, so-as to 175 // properly match spill code. 176 const int *_register_save_type; 177 // Maps from machine register to boolean; true if machine register can 178 // be holding a call argument in some signature. 179 static bool can_be_java_arg( int reg ); 180 // Maps from machine register to boolean; true if machine register holds 181 // a spillable argument. 182 static bool is_spillable_arg( int reg ); 183 184 // List of IfFalse or IfTrue Nodes that indicate a taken null test. 185 // List is valid in the post-matching space. 186 Node_List _null_check_tests; 187 void collect_null_checks( Node *proj, Node *orig_proj ); 188 void validate_null_checks( ); 189 190 Matcher(); 191 192 // Get a projection node at position pos 193 Node* get_projection(uint pos) { 194 return _projection_list[pos]; 195 } 196 197 // Push a projection node onto the projection list 198 void push_projection(Node* node) { 199 _projection_list.push(node); 200 } 201 202 Node* pop_projection() { 203 return _projection_list.pop(); 204 } 205 206 // Number of nodes in the projection list 207 uint number_of_projections() const { 208 return _projection_list.size(); 209 } 210 211 // Select instructions for entire method 212 void match(); 213 214 // Helper for match 215 OptoReg::Name warp_incoming_stk_arg( VMReg reg ); 216 217 // Transform, then walk. Does implicit DCE while walking. 218 // Name changed from "transform" to avoid it being virtual. 219 Node *xform( Node *old_space_node, int Nodes ); 220 221 // Match a single Ideal Node - turn it into a 1-Node tree; Label & Reduce. 222 MachNode *match_tree( const Node *n ); 223 MachNode *match_sfpt( SafePointNode *sfpt ); 224 // Helper for match_sfpt 225 OptoReg::Name warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ); 226 227 // Initialize first stack mask and related masks. 228 void init_first_stack_mask(); 229 230 // If we should save-on-entry this register 231 bool is_save_on_entry( int reg ); 232 233 // Fixup the save-on-entry registers 234 void Fixup_Save_On_Entry( ); 235 236 // --- Frame handling --- 237 238 // Register number of the stack slot corresponding to the incoming SP. 239 // Per the Big Picture in the AD file, it is: 240 // SharedInfo::stack0 + locks + in_preserve_stack_slots + pad2. 241 OptoReg::Name _old_SP; 242 243 // Register number of the stack slot corresponding to the highest incoming 244 // argument on the stack. Per the Big Picture in the AD file, it is: 245 // _old_SP + out_preserve_stack_slots + incoming argument size. 246 OptoReg::Name _in_arg_limit; 247 248 // Register number of the stack slot corresponding to the new SP. 249 // Per the Big Picture in the AD file, it is: 250 // _in_arg_limit + pad0 251 OptoReg::Name _new_SP; 252 253 // Register number of the stack slot corresponding to the highest outgoing 254 // argument on the stack. Per the Big Picture in the AD file, it is: 255 // _new_SP + max outgoing arguments of all calls 256 OptoReg::Name _out_arg_limit; 257 258 OptoRegPair *_parm_regs; // Array of machine registers per argument 259 RegMask *_calling_convention_mask; // Array of RegMasks per argument 260 261 // Does matcher have a match rule for this ideal node? 262 static const bool has_match_rule(int opcode); 263 static const bool _hasMatchRule[_last_opcode]; 264 265 // Does matcher have a match rule for this ideal node and is the 266 // predicate (if there is one) true? 267 // NOTE: If this function is used more commonly in the future, ADLC 268 // should generate this one. 269 static const bool match_rule_supported(int opcode); 270 271 // Used to determine if we have fast l2f conversion 272 // USII has it, USIII doesn't 273 static const bool convL2FSupported(void); 274 275 // Vector width in bytes 276 static const int vector_width_in_bytes(BasicType bt); 277 278 // Limits on vector size (number of elements). 279 static const int max_vector_size(const BasicType bt); 280 static const int min_vector_size(const BasicType bt); 281 static const bool vector_size_supported(const BasicType bt, int size) { 282 return (Matcher::max_vector_size(bt) >= size && 283 Matcher::min_vector_size(bt) <= size); 284 } 285 286 // Vector ideal reg 287 static const uint vector_ideal_reg(int len); 288 static const uint vector_shift_count_ideal_reg(int len); 289 290 // CPU supports misaligned vectors store/load. 291 static const bool misaligned_vectors_ok(); 292 293 // Should original key array reference be passed to AES stubs 294 static const bool pass_original_key_for_aes(); 295 296 // Used to determine a "low complexity" 64-bit constant. (Zero is simple.) 297 // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI). 298 // Depends on the details of 64-bit constant generation on the CPU. 299 static const bool isSimpleConstant64(jlong con); 300 301 // These calls are all generated by the ADLC 302 303 // TRUE - grows up, FALSE - grows down (Intel) 304 virtual bool stack_direction() const; 305 306 // Java-Java calling convention 307 // (what you use when Java calls Java) 308 309 // Alignment of stack in bytes, standard Intel word alignment is 4. 310 // Sparc probably wants at least double-word (8). 311 static uint stack_alignment_in_bytes(); 312 // Alignment of stack, measured in stack slots. 313 // The size of stack slots is defined by VMRegImpl::stack_slot_size. 314 static uint stack_alignment_in_slots() { 315 return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size); 316 } 317 318 // Array mapping arguments to registers. Argument 0 is usually the 'this' 319 // pointer. Registers can include stack-slots and regular registers. 320 static void calling_convention( BasicType *, VMRegPair *, uint len, bool is_outgoing ); 321 322 // Convert a sig into a calling convention register layout 323 // and find interesting things about it. 324 static OptoReg::Name find_receiver( bool is_outgoing ); 325 // Return address register. On Intel it is a stack-slot. On PowerPC 326 // it is the Link register. On Sparc it is r31? 327 virtual OptoReg::Name return_addr() const; 328 RegMask _return_addr_mask; 329 // Return value register. On Intel it is EAX. On Sparc i0/o0. 330 static OptoRegPair return_value(uint ideal_reg, bool is_outgoing); 331 static OptoRegPair c_return_value(uint ideal_reg, bool is_outgoing); 332 RegMask _return_value_mask; 333 // Inline Cache Register 334 static OptoReg::Name inline_cache_reg(); 335 static int inline_cache_reg_encode(); 336 337 // Register for DIVI projection of divmodI 338 static RegMask divI_proj_mask(); 339 // Register for MODI projection of divmodI 340 static RegMask modI_proj_mask(); 341 342 // Register for DIVL projection of divmodL 343 static RegMask divL_proj_mask(); 344 // Register for MODL projection of divmodL 345 static RegMask modL_proj_mask(); 346 347 // Use hardware DIV instruction when it is faster than 348 // a code which use multiply for division by constant. 349 static bool use_asm_for_ldiv_by_con( jlong divisor ); 350 351 static const RegMask method_handle_invoke_SP_save_mask(); 352 353 // Java-Interpreter calling convention 354 // (what you use when calling between compiled-Java and Interpreted-Java 355 356 // Number of callee-save + always-save registers 357 // Ignores frame pointer and "special" registers 358 static int number_of_saved_registers(); 359 360 // The Method-klass-holder may be passed in the inline_cache_reg 361 // and then expanded into the inline_cache_reg and a method_oop register 362 363 static OptoReg::Name interpreter_method_oop_reg(); 364 static int interpreter_method_oop_reg_encode(); 365 366 static OptoReg::Name compiler_method_oop_reg(); 367 static const RegMask &compiler_method_oop_reg_mask(); 368 static int compiler_method_oop_reg_encode(); 369 370 // Interpreter's Frame Pointer Register 371 static OptoReg::Name interpreter_frame_pointer_reg(); 372 373 // Java-Native calling convention 374 // (what you use when intercalling between Java and C++ code) 375 376 // Array mapping arguments to registers. Argument 0 is usually the 'this' 377 // pointer. Registers can include stack-slots and regular registers. 378 static void c_calling_convention( BasicType*, VMRegPair *, uint ); 379 // Frame pointer. The frame pointer is kept at the base of the stack 380 // and so is probably the stack pointer for most machines. On Intel 381 // it is ESP. On the PowerPC it is R1. On Sparc it is SP. 382 OptoReg::Name c_frame_pointer() const; 383 static RegMask c_frame_ptr_mask; 384 385 // !!!!! Special stuff for building ScopeDescs 386 virtual int regnum_to_fpu_offset(int regnum); 387 388 // Is this branch offset small enough to be addressed by a short branch? 389 bool is_short_branch_offset(int rule, int br_size, int offset); 390 391 // Optional scaling for the parameter to the ClearArray/CopyArray node. 392 static const bool init_array_count_is_in_bytes; 393 394 // Threshold small size (in bytes) for a ClearArray/CopyArray node. 395 // Anything this size or smaller may get converted to discrete scalar stores. 396 static const int init_array_short_size; 397 398 // Some hardware needs 2 CMOV's for longs. 399 static const int long_cmove_cost(); 400 401 // Some hardware have expensive CMOV for float and double. 402 static const int float_cmove_cost(); 403 404 // Should the Matcher clone shifts on addressing modes, expecting them to 405 // be subsumed into complex addressing expressions or compute them into 406 // registers? True for Intel but false for most RISCs 407 static const bool clone_shift_expressions; 408 409 static bool narrow_oop_use_complex_address(); 410 static bool narrow_klass_use_complex_address(); 411 412 // Generate implicit null check for narrow oops if it can fold 413 // into address expression (x64). 414 // 415 // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression 416 // NullCheck narrow_oop_reg 417 // 418 // When narrow oops can't fold into address expression (Sparc) and 419 // base is not null use decode_not_null and normal implicit null check. 420 // Note, decode_not_null node can be used here since it is referenced 421 // only on non null path but it requires special handling, see 422 // collect_null_checks(): 423 // 424 // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base' 425 // [oop_reg + offset] 426 // NullCheck oop_reg 427 // 428 // With Zero base and when narrow oops can not fold into address 429 // expression use normal implicit null check since only shift 430 // is needed to decode narrow oop. 431 // 432 // decode narrow_oop_reg, oop_reg // only 'shift' 433 // [oop_reg + offset] 434 // NullCheck oop_reg 435 // 436 inline static bool gen_narrow_oop_implicit_null_checks() { 437 return Universe::narrow_oop_use_implicit_null_checks() && 438 (narrow_oop_use_complex_address() || 439 Universe::narrow_oop_base() != NULL); 440 } 441 442 // Is it better to copy float constants, or load them directly from memory? 443 // Intel can load a float constant from a direct address, requiring no 444 // extra registers. Most RISCs will have to materialize an address into a 445 // register first, so they may as well materialize the constant immediately. 446 static const bool rematerialize_float_constants; 447 448 // If CPU can load and store mis-aligned doubles directly then no fixup is 449 // needed. Else we split the double into 2 integer pieces and move it 450 // piece-by-piece. Only happens when passing doubles into C code or when 451 // calling i2c adapters as the Java calling convention forces doubles to be 452 // aligned. 453 static const bool misaligned_doubles_ok; 454 455 // Does the CPU require postalloc expand (see block.cpp for description of 456 // postalloc expand)? 457 static const bool require_postalloc_expand; 458 459 // Perform a platform dependent implicit null fixup. This is needed 460 // on windows95 to take care of some unusual register constraints. 461 void pd_implicit_null_fixup(MachNode *load, uint idx); 462 463 // Advertise here if the CPU requires explicit rounding operations 464 // to implement the UseStrictFP mode. 465 static const bool strict_fp_requires_explicit_rounding; 466 467 // Are floats conerted to double when stored to stack during deoptimization? 468 static bool float_in_double(); 469 // Do ints take an entire long register or just half? 470 static const bool int_in_long; 471 472 // Do the processor's shift instructions only use the low 5/6 bits 473 // of the count for 32/64 bit ints? If not we need to do the masking 474 // ourselves. 475 static const bool need_masked_shift_count; 476 477 // This routine is run whenever a graph fails to match. 478 // If it returns, the compiler should bailout to interpreter without error. 479 // In non-product mode, SoftMatchFailure is false to detect non-canonical 480 // graphs. Print a message and exit. 481 static void soft_match_failure() { 482 if( SoftMatchFailure ) return; 483 else { fatal("SoftMatchFailure is not allowed except in product"); } 484 } 485 486 // Check for a following volatile memory barrier without an 487 // intervening load and thus we don't need a barrier here. We 488 // retain the Node to act as a compiler ordering barrier. 489 static bool post_store_load_barrier(const Node* mb); 490 491 // Does n lead to an uncommon trap that can cause deoptimization? 492 static bool branches_to_uncommon_trap(const Node *n); 493 494 #ifdef ASSERT 495 void dump_old2new_map(); // machine-independent to machine-dependent 496 497 Node* find_old_node(Node* new_node) { 498 return _new2old_map[new_node->_idx]; 499 } 500 #endif 501 }; 502 503 #endif // SHARE_VM_OPTO_MATCHER_HPP