1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MATCHER_HPP 26 #define SHARE_VM_OPTO_MATCHER_HPP 27 28 #include "libadt/vectset.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "opto/node.hpp" 31 #include "opto/phaseX.hpp" 32 #include "opto/regmask.hpp" 33 34 class Compile; 35 class Node; 36 class MachNode; 37 class MachTypeNode; 38 class MachOper; 39 40 //---------------------------Matcher------------------------------------------- 41 class Matcher : public PhaseTransform { 42 friend class VMStructs; 43 // Private arena of State objects 44 ResourceArea _states_arena; 45 46 VectorSet _visited; // Visit bits 47 48 // Used to control the Label pass 49 VectorSet _shared; // Shared Ideal Node 50 VectorSet _dontcare; // Nothing the matcher cares about 51 52 // Private methods which perform the actual matching and reduction 53 // Walks the label tree, generating machine nodes 54 MachNode *ReduceInst( State *s, int rule, Node *&mem); 55 void ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach); 56 uint ReduceInst_Interior(State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds); 57 void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach ); 58 59 // If this node already matched using "rule", return the MachNode for it. 60 MachNode* find_shared_node(Node* n, uint rule); 61 62 // Convert a dense opcode number to an expanded rule number 63 const int *_reduceOp; 64 const int *_leftOp; 65 const int *_rightOp; 66 67 // Map dense opcode number to info on when rule is swallowed constant. 68 const bool *_swallowed; 69 70 // Map dense rule number to determine if this is an instruction chain rule 71 const uint _begin_inst_chain_rule; 72 const uint _end_inst_chain_rule; 73 74 // We want to clone constants and possible CmpI-variants. 75 // If we do not clone CmpI, then we can have many instances of 76 // condition codes alive at once. This is OK on some chips and 77 // bad on others. Hence the machine-dependent table lookup. 78 const char *_must_clone; 79 80 // Find shared Nodes, or Nodes that otherwise are Matcher roots 81 void find_shared( Node *n ); 82 83 // Debug and profile information for nodes in old space: 84 GrowableArray<Node_Notes*>* _old_node_note_array; 85 86 // Node labeling iterator for instruction selection 87 Node *Label_Root( const Node *n, State *svec, Node *control, const Node *mem ); 88 89 Node *transform( Node *dummy ); 90 91 Node_List &_proj_list; // For Machine nodes killing many values 92 93 Node_Array _shared_nodes; 94 95 debug_only(Node_Array _old2new_map;) // Map roots of ideal-trees to machine-roots 96 debug_only(Node_Array _new2old_map;) // Maps machine nodes back to ideal 97 98 // Accessors for the inherited field PhaseTransform::_nodes: 99 void grow_new_node_array(uint idx_limit) { 100 _nodes.map(idx_limit-1, NULL); 101 } 102 bool has_new_node(const Node* n) const { 103 return _nodes.at(n->_idx) != NULL; 104 } 105 Node* new_node(const Node* n) const { 106 assert(has_new_node(n), "set before get"); 107 return _nodes.at(n->_idx); 108 } 109 void set_new_node(const Node* n, Node *nn) { 110 assert(!has_new_node(n), "set only once"); 111 _nodes.map(n->_idx, nn); 112 } 113 114 #ifdef ASSERT 115 // Make sure only new nodes are reachable from this node 116 void verify_new_nodes_only(Node* root); 117 118 Node* _mem_node; // Ideal memory node consumed by mach node 119 #endif 120 121 // Mach node for ConP #NULL 122 MachNode* _mach_null; 123 124 public: 125 int LabelRootDepth; 126 static const int base2reg[]; // Map Types to machine register types 127 // Convert ideal machine register to a register mask for spill-loads 128 static const RegMask *idealreg2regmask[]; 129 RegMask *idealreg2spillmask [_last_machine_leaf]; 130 RegMask *idealreg2debugmask [_last_machine_leaf]; 131 RegMask *idealreg2mhdebugmask[_last_machine_leaf]; 132 void init_spill_mask( Node *ret ); 133 // Convert machine register number to register mask 134 static uint mreg2regmask_max; 135 static RegMask mreg2regmask[]; 136 static RegMask STACK_ONLY_mask; 137 138 MachNode* mach_null() const { return _mach_null; } 139 140 bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; } 141 void set_shared( Node *n ) { _shared.set(n->_idx); } 142 bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; } 143 void set_visited( Node *n ) { _visited.set(n->_idx); } 144 bool is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; } 145 void set_dontcare( Node *n ) { _dontcare.set(n->_idx); } 146 147 // Mode bit to tell DFA and expand rules whether we are running after 148 // (or during) register selection. Usually, the matcher runs before, 149 // but it will also get called to generate post-allocation spill code. 150 // In this situation, it is a deadly error to attempt to allocate more 151 // temporary registers. 152 bool _allocation_started; 153 154 // Machine register names 155 static const char *regName[]; 156 // Machine register encodings 157 static const unsigned char _regEncode[]; 158 // Machine Node names 159 const char **_ruleName; 160 // Rules that are cheaper to rematerialize than to spill 161 static const uint _begin_rematerialize; 162 static const uint _end_rematerialize; 163 164 // An array of chars, from 0 to _last_Mach_Reg. 165 // No Save = 'N' (for register windows) 166 // Save on Entry = 'E' 167 // Save on Call = 'C' 168 // Always Save = 'A' (same as SOE + SOC) 169 const char *_register_save_policy; 170 const char *_c_reg_save_policy; 171 // Convert a machine register to a machine register type, so-as to 172 // properly match spill code. 173 const int *_register_save_type; 174 // Maps from machine register to boolean; true if machine register can 175 // be holding a call argument in some signature. 176 static bool can_be_java_arg( int reg ); 177 // Maps from machine register to boolean; true if machine register holds 178 // a spillable argument. 179 static bool is_spillable_arg( int reg ); 180 181 // List of IfFalse or IfTrue Nodes that indicate a taken null test. 182 // List is valid in the post-matching space. 183 Node_List _null_check_tests; 184 void collect_null_checks( Node *proj, Node *orig_proj ); 185 void validate_null_checks( ); 186 187 Matcher( Node_List &proj_list ); 188 189 // Select instructions for entire method 190 void match( ); 191 // Helper for match 192 OptoReg::Name warp_incoming_stk_arg( VMReg reg ); 193 194 // Transform, then walk. Does implicit DCE while walking. 195 // Name changed from "transform" to avoid it being virtual. 196 Node *xform( Node *old_space_node, int Nodes ); 197 198 // Match a single Ideal Node - turn it into a 1-Node tree; Label & Reduce. 199 MachNode *match_tree( const Node *n ); 200 MachNode *match_sfpt( SafePointNode *sfpt ); 201 // Helper for match_sfpt 202 OptoReg::Name warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ); 203 204 // Initialize first stack mask and related masks. 205 void init_first_stack_mask(); 206 207 // If we should save-on-entry this register 208 bool is_save_on_entry( int reg ); 209 210 // Fixup the save-on-entry registers 211 void Fixup_Save_On_Entry( ); 212 213 // --- Frame handling --- 214 215 // Register number of the stack slot corresponding to the incoming SP. 216 // Per the Big Picture in the AD file, it is: 217 // SharedInfo::stack0 + locks + in_preserve_stack_slots + pad2. 218 OptoReg::Name _old_SP; 219 220 // Register number of the stack slot corresponding to the highest incoming 221 // argument on the stack. Per the Big Picture in the AD file, it is: 222 // _old_SP + out_preserve_stack_slots + incoming argument size. 223 OptoReg::Name _in_arg_limit; 224 225 // Register number of the stack slot corresponding to the new SP. 226 // Per the Big Picture in the AD file, it is: 227 // _in_arg_limit + pad0 228 OptoReg::Name _new_SP; 229 230 // Register number of the stack slot corresponding to the highest outgoing 231 // argument on the stack. Per the Big Picture in the AD file, it is: 232 // _new_SP + max outgoing arguments of all calls 233 OptoReg::Name _out_arg_limit; 234 235 OptoRegPair *_parm_regs; // Array of machine registers per argument 236 RegMask *_calling_convention_mask; // Array of RegMasks per argument 237 238 // Does matcher have a match rule for this ideal node? 239 static const bool has_match_rule(int opcode); 240 static const bool _hasMatchRule[_last_opcode]; 241 242 // Does matcher have a match rule for this ideal node and is the 243 // predicate (if there is one) true? 244 // NOTE: If this function is used more commonly in the future, ADLC 245 // should generate this one. 246 static const bool match_rule_supported(int opcode); 247 248 // Used to determine if we have fast l2f conversion 249 // USII has it, USIII doesn't 250 static const bool convL2FSupported(void); 251 252 // Vector width in bytes 253 static const uint vector_width_in_bytes(void); 254 255 // Vector ideal reg 256 static const uint vector_ideal_reg(void); 257 258 // Used to determine a "low complexity" 64-bit constant. (Zero is simple.) 259 // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI). 260 // Depends on the details of 64-bit constant generation on the CPU. 261 static const bool isSimpleConstant64(jlong con); 262 263 // These calls are all generated by the ADLC 264 265 // TRUE - grows up, FALSE - grows down (Intel) 266 virtual bool stack_direction() const; 267 268 // Java-Java calling convention 269 // (what you use when Java calls Java) 270 271 // Alignment of stack in bytes, standard Intel word alignment is 4. 272 // Sparc probably wants at least double-word (8). 273 static uint stack_alignment_in_bytes(); 274 // Alignment of stack, measured in stack slots. 275 // The size of stack slots is defined by VMRegImpl::stack_slot_size. 276 static uint stack_alignment_in_slots() { 277 return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size); 278 } 279 280 // Array mapping arguments to registers. Argument 0 is usually the 'this' 281 // pointer. Registers can include stack-slots and regular registers. 282 static void calling_convention( BasicType *, VMRegPair *, uint len, bool is_outgoing ); 283 284 // Convert a sig into a calling convention register layout 285 // and find interesting things about it. 286 static OptoReg::Name find_receiver( bool is_outgoing ); 287 // Return address register. On Intel it is a stack-slot. On PowerPC 288 // it is the Link register. On Sparc it is r31? 289 virtual OptoReg::Name return_addr() const; 290 RegMask _return_addr_mask; 291 // Return value register. On Intel it is EAX. On Sparc i0/o0. 292 static OptoRegPair return_value(int ideal_reg, bool is_outgoing); 293 static OptoRegPair c_return_value(int ideal_reg, bool is_outgoing); 294 RegMask _return_value_mask; 295 // Inline Cache Register 296 static OptoReg::Name inline_cache_reg(); 297 static const RegMask &inline_cache_reg_mask(); 298 static int inline_cache_reg_encode(); 299 300 // Register for DIVI projection of divmodI 301 static RegMask divI_proj_mask(); 302 // Register for MODI projection of divmodI 303 static RegMask modI_proj_mask(); 304 305 // Register for DIVL projection of divmodL 306 static RegMask divL_proj_mask(); 307 // Register for MODL projection of divmodL 308 static RegMask modL_proj_mask(); 309 310 static const RegMask method_handle_invoke_SP_save_mask(); 311 312 // Java-Interpreter calling convention 313 // (what you use when calling between compiled-Java and Interpreted-Java 314 315 // Number of callee-save + always-save registers 316 // Ignores frame pointer and "special" registers 317 static int number_of_saved_registers(); 318 319 // The Method-klass-holder may be passed in the inline_cache_reg 320 // and then expanded into the inline_cache_reg and a method_oop register 321 322 static OptoReg::Name interpreter_method_oop_reg(); 323 static const RegMask &interpreter_method_oop_reg_mask(); 324 static int interpreter_method_oop_reg_encode(); 325 326 static OptoReg::Name compiler_method_oop_reg(); 327 static const RegMask &compiler_method_oop_reg_mask(); 328 static int compiler_method_oop_reg_encode(); 329 330 // Interpreter's Frame Pointer Register 331 static OptoReg::Name interpreter_frame_pointer_reg(); 332 static const RegMask &interpreter_frame_pointer_reg_mask(); 333 334 // Java-Native calling convention 335 // (what you use when intercalling between Java and C++ code) 336 337 // Array mapping arguments to registers. Argument 0 is usually the 'this' 338 // pointer. Registers can include stack-slots and regular registers. 339 static void c_calling_convention( BasicType*, VMRegPair *, uint ); 340 // Frame pointer. The frame pointer is kept at the base of the stack 341 // and so is probably the stack pointer for most machines. On Intel 342 // it is ESP. On the PowerPC it is R1. On Sparc it is SP. 343 OptoReg::Name c_frame_pointer() const; 344 static RegMask c_frame_ptr_mask; 345 346 // !!!!! Special stuff for building ScopeDescs 347 virtual int regnum_to_fpu_offset(int regnum); 348 349 // Is this branch offset small enough to be addressed by a short branch? 350 bool is_short_branch_offset(int rule, int offset); 351 352 // Optional scaling for the parameter to the ClearArray/CopyArray node. 353 static const bool init_array_count_is_in_bytes; 354 355 // Threshold small size (in bytes) for a ClearArray/CopyArray node. 356 // Anything this size or smaller may get converted to discrete scalar stores. 357 static const int init_array_short_size; 358 359 // Should the Matcher clone shifts on addressing modes, expecting them to 360 // be subsumed into complex addressing expressions or compute them into 361 // registers? True for Intel but false for most RISCs 362 static const bool clone_shift_expressions; 363 364 static bool narrow_oop_use_complex_address(); 365 366 // Generate implicit null check for narrow oops if it can fold 367 // into address expression (x64). 368 // 369 // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression 370 // NullCheck narrow_oop_reg 371 // 372 // When narrow oops can't fold into address expression (Sparc) and 373 // base is not null use decode_not_null and normal implicit null check. 374 // Note, decode_not_null node can be used here since it is referenced 375 // only on non null path but it requires special handling, see 376 // collect_null_checks(): 377 // 378 // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base' 379 // [oop_reg + offset] 380 // NullCheck oop_reg 381 // 382 // With Zero base and when narrow oops can not fold into address 383 // expression use normal implicit null check since only shift 384 // is needed to decode narrow oop. 385 // 386 // decode narrow_oop_reg, oop_reg // only 'shift' 387 // [oop_reg + offset] 388 // NullCheck oop_reg 389 // 390 inline static bool gen_narrow_oop_implicit_null_checks() { 391 return Universe::narrow_oop_use_implicit_null_checks() && 392 (narrow_oop_use_complex_address() || 393 Universe::narrow_oop_base() != NULL); 394 } 395 396 // Is it better to copy float constants, or load them directly from memory? 397 // Intel can load a float constant from a direct address, requiring no 398 // extra registers. Most RISCs will have to materialize an address into a 399 // register first, so they may as well materialize the constant immediately. 400 static const bool rematerialize_float_constants; 401 402 // If CPU can load and store mis-aligned doubles directly then no fixup is 403 // needed. Else we split the double into 2 integer pieces and move it 404 // piece-by-piece. Only happens when passing doubles into C code or when 405 // calling i2c adapters as the Java calling convention forces doubles to be 406 // aligned. 407 static const bool misaligned_doubles_ok; 408 409 // Perform a platform dependent implicit null fixup. This is needed 410 // on windows95 to take care of some unusual register constraints. 411 void pd_implicit_null_fixup(MachNode *load, uint idx); 412 413 // Advertise here if the CPU requires explicit rounding operations 414 // to implement the UseStrictFP mode. 415 static const bool strict_fp_requires_explicit_rounding; 416 417 // Are floats conerted to double when stored to stack during deoptimization? 418 static bool float_in_double(); 419 // Do ints take an entire long register or just half? 420 static const bool int_in_long; 421 422 // This routine is run whenever a graph fails to match. 423 // If it returns, the compiler should bailout to interpreter without error. 424 // In non-product mode, SoftMatchFailure is false to detect non-canonical 425 // graphs. Print a message and exit. 426 static void soft_match_failure() { 427 if( SoftMatchFailure ) return; 428 else { fatal("SoftMatchFailure is not allowed except in product"); } 429 } 430 431 // Used by the DFA in dfa_sparc.cpp. Check for a prior FastLock 432 // acting as an Acquire and thus we don't need an Acquire here. We 433 // retain the Node to act as a compiler ordering barrier. 434 static bool prior_fast_lock( const Node *acq ); 435 436 // Used by the DFA in dfa_sparc.cpp. Check for a following 437 // FastUnLock acting as a Release and thus we don't need a Release 438 // here. We retain the Node to act as a compiler ordering barrier. 439 static bool post_fast_unlock( const Node *rel ); 440 441 // Check for a following volatile memory barrier without an 442 // intervening load and thus we don't need a barrier here. We 443 // retain the Node to act as a compiler ordering barrier. 444 static bool post_store_load_barrier(const Node* mb); 445 446 447 #ifdef ASSERT 448 void dump_old2new_map(); // machine-independent to machine-dependent 449 450 Node* find_old_node(Node* new_node) { 451 return _new2old_map[new_node->_idx]; 452 } 453 #endif 454 }; 455 456 #endif // SHARE_VM_OPTO_MATCHER_HPP